Imported Debian version 2.4.3~trusty1
[deb_ffmpeg.git] / ffmpeg / libavcodec / mpegvideo.c
CommitLineData
2ba45a60
DM
1/*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of FFmpeg.
9 *
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25/**
26 * @file
27 * The simplest mpeg encoder (well, it was the simplest!).
28 */
29
30#include "libavutil/attributes.h"
31#include "libavutil/avassert.h"
32#include "libavutil/imgutils.h"
33#include "libavutil/internal.h"
34#include "libavutil/motion_vector.h"
35#include "libavutil/timer.h"
36#include "avcodec.h"
37#include "blockdsp.h"
38#include "h264chroma.h"
39#include "idctdsp.h"
40#include "internal.h"
41#include "mathops.h"
42#include "mpegutils.h"
43#include "mpegvideo.h"
44#include "mjpegenc.h"
45#include "msmpeg4.h"
46#include "qpeldsp.h"
47#include "thread.h"
48#include <limits.h>
49
50static const uint8_t ff_default_chroma_qscale_table[32] = {
51// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
52 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
53 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
54};
55
56const uint8_t ff_mpeg1_dc_scale_table[128] = {
57// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
65 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
66};
67
68static const uint8_t mpeg2_dc_scale_table1[128] = {
69// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
77 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
78};
79
80static const uint8_t mpeg2_dc_scale_table2[128] = {
81// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
89 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
90};
91
92static const uint8_t mpeg2_dc_scale_table3[128] = {
93// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102};
103
104const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
105 ff_mpeg1_dc_scale_table,
106 mpeg2_dc_scale_table1,
107 mpeg2_dc_scale_table2,
108 mpeg2_dc_scale_table3,
109};
110
111const uint8_t ff_alternate_horizontal_scan[64] = {
112 0, 1, 2, 3, 8, 9, 16, 17,
113 10, 11, 4, 5, 6, 7, 15, 14,
114 13, 12, 19, 18, 24, 25, 32, 33,
115 26, 27, 20, 21, 22, 23, 28, 29,
116 30, 31, 34, 35, 40, 41, 48, 49,
117 42, 43, 36, 37, 38, 39, 44, 45,
118 46, 47, 50, 51, 56, 57, 58, 59,
119 52, 53, 54, 55, 60, 61, 62, 63,
120};
121
122const uint8_t ff_alternate_vertical_scan[64] = {
123 0, 8, 16, 24, 1, 9, 2, 10,
124 17, 25, 32, 40, 48, 56, 57, 49,
125 41, 33, 26, 18, 3, 11, 4, 12,
126 19, 27, 34, 42, 50, 58, 35, 43,
127 51, 59, 20, 28, 5, 13, 6, 14,
128 21, 29, 36, 44, 52, 60, 37, 45,
129 53, 61, 22, 30, 7, 15, 23, 31,
130 38, 46, 54, 62, 39, 47, 55, 63,
131};
132
133static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
134 int16_t *block, int n, int qscale)
135{
136 int i, level, nCoeffs;
137 const uint16_t *quant_matrix;
138
139 nCoeffs= s->block_last_index[n];
140
141 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
142 /* XXX: only mpeg1 */
143 quant_matrix = s->intra_matrix;
144 for(i=1;i<=nCoeffs;i++) {
145 int j= s->intra_scantable.permutated[i];
146 level = block[j];
147 if (level) {
148 if (level < 0) {
149 level = -level;
150 level = (int)(level * qscale * quant_matrix[j]) >> 3;
151 level = (level - 1) | 1;
152 level = -level;
153 } else {
154 level = (int)(level * qscale * quant_matrix[j]) >> 3;
155 level = (level - 1) | 1;
156 }
157 block[j] = level;
158 }
159 }
160}
161
162static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
163 int16_t *block, int n, int qscale)
164{
165 int i, level, nCoeffs;
166 const uint16_t *quant_matrix;
167
168 nCoeffs= s->block_last_index[n];
169
170 quant_matrix = s->inter_matrix;
171 for(i=0; i<=nCoeffs; i++) {
172 int j= s->intra_scantable.permutated[i];
173 level = block[j];
174 if (level) {
175 if (level < 0) {
176 level = -level;
177 level = (((level << 1) + 1) * qscale *
178 ((int) (quant_matrix[j]))) >> 4;
179 level = (level - 1) | 1;
180 level = -level;
181 } else {
182 level = (((level << 1) + 1) * qscale *
183 ((int) (quant_matrix[j]))) >> 4;
184 level = (level - 1) | 1;
185 }
186 block[j] = level;
187 }
188 }
189}
190
191static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
192 int16_t *block, int n, int qscale)
193{
194 int i, level, nCoeffs;
195 const uint16_t *quant_matrix;
196
197 if(s->alternate_scan) nCoeffs= 63;
198 else nCoeffs= s->block_last_index[n];
199
200 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
201 quant_matrix = s->intra_matrix;
202 for(i=1;i<=nCoeffs;i++) {
203 int j= s->intra_scantable.permutated[i];
204 level = block[j];
205 if (level) {
206 if (level < 0) {
207 level = -level;
208 level = (int)(level * qscale * quant_matrix[j]) >> 3;
209 level = -level;
210 } else {
211 level = (int)(level * qscale * quant_matrix[j]) >> 3;
212 }
213 block[j] = level;
214 }
215 }
216}
217
218static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
219 int16_t *block, int n, int qscale)
220{
221 int i, level, nCoeffs;
222 const uint16_t *quant_matrix;
223 int sum=-1;
224
225 if(s->alternate_scan) nCoeffs= 63;
226 else nCoeffs= s->block_last_index[n];
227
228 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
229 sum += block[0];
230 quant_matrix = s->intra_matrix;
231 for(i=1;i<=nCoeffs;i++) {
232 int j= s->intra_scantable.permutated[i];
233 level = block[j];
234 if (level) {
235 if (level < 0) {
236 level = -level;
237 level = (int)(level * qscale * quant_matrix[j]) >> 3;
238 level = -level;
239 } else {
240 level = (int)(level * qscale * quant_matrix[j]) >> 3;
241 }
242 block[j] = level;
243 sum+=level;
244 }
245 }
246 block[63]^=sum&1;
247}
248
249static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
250 int16_t *block, int n, int qscale)
251{
252 int i, level, nCoeffs;
253 const uint16_t *quant_matrix;
254 int sum=-1;
255
256 if(s->alternate_scan) nCoeffs= 63;
257 else nCoeffs= s->block_last_index[n];
258
259 quant_matrix = s->inter_matrix;
260 for(i=0; i<=nCoeffs; i++) {
261 int j= s->intra_scantable.permutated[i];
262 level = block[j];
263 if (level) {
264 if (level < 0) {
265 level = -level;
266 level = (((level << 1) + 1) * qscale *
267 ((int) (quant_matrix[j]))) >> 4;
268 level = -level;
269 } else {
270 level = (((level << 1) + 1) * qscale *
271 ((int) (quant_matrix[j]))) >> 4;
272 }
273 block[j] = level;
274 sum+=level;
275 }
276 }
277 block[63]^=sum&1;
278}
279
280static void dct_unquantize_h263_intra_c(MpegEncContext *s,
281 int16_t *block, int n, int qscale)
282{
283 int i, level, qmul, qadd;
284 int nCoeffs;
285
286 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
287
288 qmul = qscale << 1;
289
290 if (!s->h263_aic) {
291 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
292 qadd = (qscale - 1) | 1;
293 }else{
294 qadd = 0;
295 }
296 if(s->ac_pred)
297 nCoeffs=63;
298 else
299 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
300
301 for(i=1; i<=nCoeffs; i++) {
302 level = block[i];
303 if (level) {
304 if (level < 0) {
305 level = level * qmul - qadd;
306 } else {
307 level = level * qmul + qadd;
308 }
309 block[i] = level;
310 }
311 }
312}
313
314static void dct_unquantize_h263_inter_c(MpegEncContext *s,
315 int16_t *block, int n, int qscale)
316{
317 int i, level, qmul, qadd;
318 int nCoeffs;
319
320 av_assert2(s->block_last_index[n]>=0);
321
322 qadd = (qscale - 1) | 1;
323 qmul = qscale << 1;
324
325 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
326
327 for(i=0; i<=nCoeffs; i++) {
328 level = block[i];
329 if (level) {
330 if (level < 0) {
331 level = level * qmul - qadd;
332 } else {
333 level = level * qmul + qadd;
334 }
335 block[i] = level;
336 }
337 }
338}
339
340static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
341 int (*mv)[2][4][2],
342 int mb_x, int mb_y, int mb_intra, int mb_skipped)
343{
344 MpegEncContext *s = opaque;
345
346 s->mv_dir = mv_dir;
347 s->mv_type = mv_type;
348 s->mb_intra = mb_intra;
349 s->mb_skipped = mb_skipped;
350 s->mb_x = mb_x;
351 s->mb_y = mb_y;
352 memcpy(s->mv, mv, sizeof(*mv));
353
354 ff_init_block_index(s);
355 ff_update_block_index(s);
356
357 s->bdsp.clear_blocks(s->block[0]);
358
359 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
360 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
361 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
362
363 if (ref)
364 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
365 ff_mpv_decode_mb(s, s->block);
366}
367
368static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
369{
370 while(h--)
371 memset(dst + h*linesize, 128, 16);
372}
373
374static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
375{
376 while(h--)
377 memset(dst + h*linesize, 128, 8);
378}
379
380/* init common dct for both encoder and decoder */
381static av_cold int dct_init(MpegEncContext *s)
382{
383 ff_blockdsp_init(&s->bdsp, s->avctx);
384 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
385 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
386 ff_me_cmp_init(&s->mecc, s->avctx);
387 ff_mpegvideodsp_init(&s->mdsp);
388 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
389
390 if (s->avctx->debug & FF_DEBUG_NOMC) {
391 int i;
392 for (i=0; i<4; i++) {
393 s->hdsp.avg_pixels_tab[0][i] = gray16;
394 s->hdsp.put_pixels_tab[0][i] = gray16;
395 s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
396
397 s->hdsp.avg_pixels_tab[1][i] = gray8;
398 s->hdsp.put_pixels_tab[1][i] = gray8;
399 s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
400 }
401 }
402
403 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
404 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
405 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
406 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
407 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
408 if (s->flags & CODEC_FLAG_BITEXACT)
409 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
410 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
411
412 if (HAVE_INTRINSICS_NEON)
413 ff_mpv_common_init_neon(s);
414
415 if (ARCH_ALPHA)
416 ff_mpv_common_init_axp(s);
417 if (ARCH_ARM)
418 ff_mpv_common_init_arm(s);
419 if (ARCH_PPC)
420 ff_mpv_common_init_ppc(s);
421 if (ARCH_X86)
422 ff_mpv_common_init_x86(s);
423
424 return 0;
425}
426
427av_cold void ff_mpv_idct_init(MpegEncContext *s)
428{
429 ff_idctdsp_init(&s->idsp, s->avctx);
430
431 /* load & permutate scantables
432 * note: only wmv uses different ones
433 */
434 if (s->alternate_scan) {
435 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
436 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
437 } else {
438 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
439 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
440 }
441 ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
442 ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
443}
444
445static int frame_size_alloc(MpegEncContext *s, int linesize)
446{
447 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
448
449 if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
450 return 0;
451
452 if (linesize < 24) {
453 av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
454 return AVERROR_PATCHWELCOME;
455 }
456
457 // edge emu needs blocksize + filter length - 1
458 // (= 17x17 for halfpel / 21x21 for h264)
459 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
460 // at uvlinesize. It supports only YUV420 so 24x24 is enough
461 // linesize * interlaced * MBsize
462 // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
463 FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size, 4 * 68,
464 fail);
465
466 FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size, 4 * 16 * 2,
467 fail)
468 s->me.temp = s->me.scratchpad;
469 s->rd_scratchpad = s->me.scratchpad;
470 s->b_scratchpad = s->me.scratchpad;
471 s->obmc_scratchpad = s->me.scratchpad + 16;
472
473 return 0;
474fail:
475 av_freep(&s->edge_emu_buffer);
476 return AVERROR(ENOMEM);
477}
478
479/**
480 * Allocate a frame buffer
481 */
482static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
483{
484 int edges_needed = av_codec_is_encoder(s->avctx->codec);
485 int r, ret;
486
487 pic->tf.f = pic->f;
488 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
489 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
490 s->codec_id != AV_CODEC_ID_MSS2) {
491 if (edges_needed) {
492 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
493 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
494 }
495
496 r = ff_thread_get_buffer(s->avctx, &pic->tf,
497 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
498 } else {
499 pic->f->width = s->avctx->width;
500 pic->f->height = s->avctx->height;
501 pic->f->format = s->avctx->pix_fmt;
502 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
503 }
504
505 if (r < 0 || !pic->f->buf[0]) {
506 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
507 r, pic->f->data[0]);
508 return -1;
509 }
510
511 if (edges_needed) {
512 int i;
513 for (i = 0; pic->f->data[i]; i++) {
514 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
515 pic->f->linesize[i] +
516 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
517 pic->f->data[i] += offset;
518 }
519 pic->f->width = s->avctx->width;
520 pic->f->height = s->avctx->height;
521 }
522
523 if (s->avctx->hwaccel) {
524 assert(!pic->hwaccel_picture_private);
525 if (s->avctx->hwaccel->frame_priv_data_size) {
526 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
527 if (!pic->hwaccel_priv_buf) {
528 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
529 return -1;
530 }
531 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
532 }
533 }
534
535 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
536 s->uvlinesize != pic->f->linesize[1])) {
537 av_log(s->avctx, AV_LOG_ERROR,
538 "get_buffer() failed (stride changed)\n");
539 ff_mpeg_unref_picture(s, pic);
540 return -1;
541 }
542
543 if (pic->f->linesize[1] != pic->f->linesize[2]) {
544 av_log(s->avctx, AV_LOG_ERROR,
545 "get_buffer() failed (uv stride mismatch)\n");
546 ff_mpeg_unref_picture(s, pic);
547 return -1;
548 }
549
550 if (!s->edge_emu_buffer &&
551 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
552 av_log(s->avctx, AV_LOG_ERROR,
553 "get_buffer() failed to allocate context scratch buffers.\n");
554 ff_mpeg_unref_picture(s, pic);
555 return ret;
556 }
557
558 return 0;
559}
560
561void ff_free_picture_tables(Picture *pic)
562{
563 int i;
564
565 pic->alloc_mb_width =
566 pic->alloc_mb_height = 0;
567
568 av_buffer_unref(&pic->mb_var_buf);
569 av_buffer_unref(&pic->mc_mb_var_buf);
570 av_buffer_unref(&pic->mb_mean_buf);
571 av_buffer_unref(&pic->mbskip_table_buf);
572 av_buffer_unref(&pic->qscale_table_buf);
573 av_buffer_unref(&pic->mb_type_buf);
574
575 for (i = 0; i < 2; i++) {
576 av_buffer_unref(&pic->motion_val_buf[i]);
577 av_buffer_unref(&pic->ref_index_buf[i]);
578 }
579}
580
581static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
582{
583 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
584 const int mb_array_size = s->mb_stride * s->mb_height;
585 const int b8_array_size = s->b8_stride * s->mb_height * 2;
586 int i;
587
588
589 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
590 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
591 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
592 sizeof(uint32_t));
593 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
594 return AVERROR(ENOMEM);
595
596 if (s->encoding) {
597 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
598 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
599 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
600 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
601 return AVERROR(ENOMEM);
602 }
603
604 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv ||
605 (s->avctx->flags2 & CODEC_FLAG2_EXPORT_MVS)) {
606 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
607 int ref_index_size = 4 * mb_array_size;
608
609 for (i = 0; mv_size && i < 2; i++) {
610 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
611 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
612 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
613 return AVERROR(ENOMEM);
614 }
615 }
616
617 pic->alloc_mb_width = s->mb_width;
618 pic->alloc_mb_height = s->mb_height;
619
620 return 0;
621}
622
623static int make_tables_writable(Picture *pic)
624{
625 int ret, i;
626#define MAKE_WRITABLE(table) \
627do {\
628 if (pic->table &&\
629 (ret = av_buffer_make_writable(&pic->table)) < 0)\
630 return ret;\
631} while (0)
632
633 MAKE_WRITABLE(mb_var_buf);
634 MAKE_WRITABLE(mc_mb_var_buf);
635 MAKE_WRITABLE(mb_mean_buf);
636 MAKE_WRITABLE(mbskip_table_buf);
637 MAKE_WRITABLE(qscale_table_buf);
638 MAKE_WRITABLE(mb_type_buf);
639
640 for (i = 0; i < 2; i++) {
641 MAKE_WRITABLE(motion_val_buf[i]);
642 MAKE_WRITABLE(ref_index_buf[i]);
643 }
644
645 return 0;
646}
647
648/**
649 * Allocate a Picture.
650 * The pixels are allocated/set by calling get_buffer() if shared = 0
651 */
652int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
653{
654 int i, ret;
655
656 if (pic->qscale_table_buf)
657 if ( pic->alloc_mb_width != s->mb_width
658 || pic->alloc_mb_height != s->mb_height)
659 ff_free_picture_tables(pic);
660
661 if (shared) {
662 av_assert0(pic->f->data[0]);
663 pic->shared = 1;
664 } else {
665 av_assert0(!pic->f->buf[0]);
666
667 if (alloc_frame_buffer(s, pic) < 0)
668 return -1;
669
670 s->linesize = pic->f->linesize[0];
671 s->uvlinesize = pic->f->linesize[1];
672 }
673
674 if (!pic->qscale_table_buf)
675 ret = alloc_picture_tables(s, pic);
676 else
677 ret = make_tables_writable(pic);
678 if (ret < 0)
679 goto fail;
680
681 if (s->encoding) {
682 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
683 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
684 pic->mb_mean = pic->mb_mean_buf->data;
685 }
686
687 pic->mbskip_table = pic->mbskip_table_buf->data;
688 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
689 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
690
691 if (pic->motion_val_buf[0]) {
692 for (i = 0; i < 2; i++) {
693 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
694 pic->ref_index[i] = pic->ref_index_buf[i]->data;
695 }
696 }
697
698 return 0;
699fail:
700 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
701 ff_mpeg_unref_picture(s, pic);
702 ff_free_picture_tables(pic);
703 return AVERROR(ENOMEM);
704}
705
706/**
707 * Deallocate a picture.
708 */
709void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
710{
711 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
712
713 pic->tf.f = pic->f;
714 /* WM Image / Screen codecs allocate internal buffers with different
715 * dimensions / colorspaces; ignore user-defined callbacks for these. */
716 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
717 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
718 s->codec_id != AV_CODEC_ID_MSS2)
719 ff_thread_release_buffer(s->avctx, &pic->tf);
720 else if (pic->f)
721 av_frame_unref(pic->f);
722
723 av_buffer_unref(&pic->hwaccel_priv_buf);
724
725 if (pic->needs_realloc)
726 ff_free_picture_tables(pic);
727
728 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
729}
730
731static int update_picture_tables(Picture *dst, Picture *src)
732{
733 int i;
734
735#define UPDATE_TABLE(table)\
736do {\
737 if (src->table &&\
738 (!dst->table || dst->table->buffer != src->table->buffer)) {\
739 av_buffer_unref(&dst->table);\
740 dst->table = av_buffer_ref(src->table);\
741 if (!dst->table) {\
742 ff_free_picture_tables(dst);\
743 return AVERROR(ENOMEM);\
744 }\
745 }\
746} while (0)
747
748 UPDATE_TABLE(mb_var_buf);
749 UPDATE_TABLE(mc_mb_var_buf);
750 UPDATE_TABLE(mb_mean_buf);
751 UPDATE_TABLE(mbskip_table_buf);
752 UPDATE_TABLE(qscale_table_buf);
753 UPDATE_TABLE(mb_type_buf);
754 for (i = 0; i < 2; i++) {
755 UPDATE_TABLE(motion_val_buf[i]);
756 UPDATE_TABLE(ref_index_buf[i]);
757 }
758
759 dst->mb_var = src->mb_var;
760 dst->mc_mb_var = src->mc_mb_var;
761 dst->mb_mean = src->mb_mean;
762 dst->mbskip_table = src->mbskip_table;
763 dst->qscale_table = src->qscale_table;
764 dst->mb_type = src->mb_type;
765 for (i = 0; i < 2; i++) {
766 dst->motion_val[i] = src->motion_val[i];
767 dst->ref_index[i] = src->ref_index[i];
768 }
769
770 dst->alloc_mb_width = src->alloc_mb_width;
771 dst->alloc_mb_height = src->alloc_mb_height;
772
773 return 0;
774}
775
776int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
777{
778 int ret;
779
780 av_assert0(!dst->f->buf[0]);
781 av_assert0(src->f->buf[0]);
782
783 src->tf.f = src->f;
784 dst->tf.f = dst->f;
785 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
786 if (ret < 0)
787 goto fail;
788
789 ret = update_picture_tables(dst, src);
790 if (ret < 0)
791 goto fail;
792
793 if (src->hwaccel_picture_private) {
794 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
795 if (!dst->hwaccel_priv_buf)
796 goto fail;
797 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
798 }
799
800 dst->field_picture = src->field_picture;
801 dst->mb_var_sum = src->mb_var_sum;
802 dst->mc_mb_var_sum = src->mc_mb_var_sum;
803 dst->b_frame_score = src->b_frame_score;
804 dst->needs_realloc = src->needs_realloc;
805 dst->reference = src->reference;
806 dst->shared = src->shared;
807
808 return 0;
809fail:
810 ff_mpeg_unref_picture(s, dst);
811 return ret;
812}
813
814static void exchange_uv(MpegEncContext *s)
815{
816 int16_t (*tmp)[64];
817
818 tmp = s->pblocks[4];
819 s->pblocks[4] = s->pblocks[5];
820 s->pblocks[5] = tmp;
821}
822
823static int init_duplicate_context(MpegEncContext *s)
824{
825 int y_size = s->b8_stride * (2 * s->mb_height + 1);
826 int c_size = s->mb_stride * (s->mb_height + 1);
827 int yc_size = y_size + 2 * c_size;
828 int i;
829
830 if (s->mb_height & 1)
831 yc_size += 2*s->b8_stride + 2*s->mb_stride;
832
833 s->edge_emu_buffer =
834 s->me.scratchpad =
835 s->me.temp =
836 s->rd_scratchpad =
837 s->b_scratchpad =
838 s->obmc_scratchpad = NULL;
839
840 if (s->encoding) {
841 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
842 ME_MAP_SIZE * sizeof(uint32_t), fail)
843 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
844 ME_MAP_SIZE * sizeof(uint32_t), fail)
845 if (s->avctx->noise_reduction) {
846 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
847 2 * 64 * sizeof(int), fail)
848 }
849 }
850 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
851 s->block = s->blocks[0];
852
853 for (i = 0; i < 12; i++) {
854 s->pblocks[i] = &s->block[i];
855 }
856 if (s->avctx->codec_tag == AV_RL32("VCR2"))
857 exchange_uv(s);
858
859 if (s->out_format == FMT_H263) {
860 /* ac values */
861 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
862 yc_size * sizeof(int16_t) * 16, fail);
863 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
864 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
865 s->ac_val[2] = s->ac_val[1] + c_size;
866 }
867
868 return 0;
869fail:
870 return -1; // free() through ff_mpv_common_end()
871}
872
873static void free_duplicate_context(MpegEncContext *s)
874{
875 if (!s)
876 return;
877
878 av_freep(&s->edge_emu_buffer);
879 av_freep(&s->me.scratchpad);
880 s->me.temp =
881 s->rd_scratchpad =
882 s->b_scratchpad =
883 s->obmc_scratchpad = NULL;
884
885 av_freep(&s->dct_error_sum);
886 av_freep(&s->me.map);
887 av_freep(&s->me.score_map);
888 av_freep(&s->blocks);
889 av_freep(&s->ac_val_base);
890 s->block = NULL;
891}
892
893static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
894{
895#define COPY(a) bak->a = src->a
896 COPY(edge_emu_buffer);
897 COPY(me.scratchpad);
898 COPY(me.temp);
899 COPY(rd_scratchpad);
900 COPY(b_scratchpad);
901 COPY(obmc_scratchpad);
902 COPY(me.map);
903 COPY(me.score_map);
904 COPY(blocks);
905 COPY(block);
906 COPY(start_mb_y);
907 COPY(end_mb_y);
908 COPY(me.map_generation);
909 COPY(pb);
910 COPY(dct_error_sum);
911 COPY(dct_count[0]);
912 COPY(dct_count[1]);
913 COPY(ac_val_base);
914 COPY(ac_val[0]);
915 COPY(ac_val[1]);
916 COPY(ac_val[2]);
917#undef COPY
918}
919
920int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
921{
922 MpegEncContext bak;
923 int i, ret;
924 // FIXME copy only needed parts
925 // START_TIMER
926 backup_duplicate_context(&bak, dst);
927 memcpy(dst, src, sizeof(MpegEncContext));
928 backup_duplicate_context(dst, &bak);
929 for (i = 0; i < 12; i++) {
930 dst->pblocks[i] = &dst->block[i];
931 }
932 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
933 exchange_uv(dst);
934 if (!dst->edge_emu_buffer &&
935 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
936 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
937 "scratch buffers.\n");
938 return ret;
939 }
940 // STOP_TIMER("update_duplicate_context")
941 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
942 return 0;
943}
944
945int ff_mpeg_update_thread_context(AVCodecContext *dst,
946 const AVCodecContext *src)
947{
948 int i, ret;
949 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
950
951 if (dst == src)
952 return 0;
953
954 av_assert0(s != s1);
955
956 // FIXME can parameters change on I-frames?
957 // in that case dst may need a reinit
958 if (!s->context_initialized) {
959 memcpy(s, s1, sizeof(MpegEncContext));
960
961 s->avctx = dst;
962 s->bitstream_buffer = NULL;
963 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
964
965 if (s1->context_initialized){
966// s->picture_range_start += MAX_PICTURE_COUNT;
967// s->picture_range_end += MAX_PICTURE_COUNT;
968 ff_mpv_idct_init(s);
969 if((ret = ff_mpv_common_init(s)) < 0){
970 memset(s, 0, sizeof(MpegEncContext));
971 s->avctx = dst;
972 return ret;
973 }
974 }
975 }
976
977 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
978 s->context_reinit = 0;
979 s->height = s1->height;
980 s->width = s1->width;
981 if ((ret = ff_mpv_common_frame_size_change(s)) < 0)
982 return ret;
983 }
984
985 s->avctx->coded_height = s1->avctx->coded_height;
986 s->avctx->coded_width = s1->avctx->coded_width;
987 s->avctx->width = s1->avctx->width;
988 s->avctx->height = s1->avctx->height;
989
990 s->coded_picture_number = s1->coded_picture_number;
991 s->picture_number = s1->picture_number;
992
993 av_assert0(!s->picture || s->picture != s1->picture);
994 if(s->picture)
995 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
996 ff_mpeg_unref_picture(s, &s->picture[i]);
997 if (s1->picture[i].f->buf[0] &&
998 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
999 return ret;
1000 }
1001
1002#define UPDATE_PICTURE(pic)\
1003do {\
1004 ff_mpeg_unref_picture(s, &s->pic);\
1005 if (s1->pic.f && s1->pic.f->buf[0])\
1006 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
1007 else\
1008 ret = update_picture_tables(&s->pic, &s1->pic);\
1009 if (ret < 0)\
1010 return ret;\
1011} while (0)
1012
1013 UPDATE_PICTURE(current_picture);
1014 UPDATE_PICTURE(last_picture);
1015 UPDATE_PICTURE(next_picture);
1016
1017 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1018 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1019 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1020
1021 // Error/bug resilience
1022 s->next_p_frame_damaged = s1->next_p_frame_damaged;
1023 s->workaround_bugs = s1->workaround_bugs;
1024 s->padding_bug_score = s1->padding_bug_score;
1025
1026 // MPEG4 timing info
1027 memcpy(&s->last_time_base, &s1->last_time_base,
1028 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1029 (char *) &s1->last_time_base);
1030
1031 // B-frame info
1032 s->max_b_frames = s1->max_b_frames;
1033 s->low_delay = s1->low_delay;
1034 s->droppable = s1->droppable;
1035
1036 // DivX handling (doesn't work)
1037 s->divx_packed = s1->divx_packed;
1038
1039 if (s1->bitstream_buffer) {
1040 if (s1->bitstream_buffer_size +
1041 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
1042 av_fast_malloc(&s->bitstream_buffer,
1043 &s->allocated_bitstream_buffer_size,
1044 s1->allocated_bitstream_buffer_size);
1045 s->bitstream_buffer_size = s1->bitstream_buffer_size;
1046 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1047 s1->bitstream_buffer_size);
1048 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1049 FF_INPUT_BUFFER_PADDING_SIZE);
1050 }
1051
1052 // linesize dependend scratch buffer allocation
1053 if (!s->edge_emu_buffer)
1054 if (s1->linesize) {
1055 if (frame_size_alloc(s, s1->linesize) < 0) {
1056 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1057 "scratch buffers.\n");
1058 return AVERROR(ENOMEM);
1059 }
1060 } else {
1061 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1062 "be allocated due to unknown size.\n");
1063 }
1064
1065 // MPEG2/interlacing info
1066 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1067 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1068
1069 if (!s1->first_field) {
1070 s->last_pict_type = s1->pict_type;
1071 if (s1->current_picture_ptr)
1072 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1073 }
1074
1075 return 0;
1076}
1077
1078/**
1079 * Set the given MpegEncContext to common defaults
1080 * (same for encoding and decoding).
1081 * The changed fields will not depend upon the
1082 * prior state of the MpegEncContext.
1083 */
1084void ff_mpv_common_defaults(MpegEncContext *s)
1085{
1086 s->y_dc_scale_table =
1087 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1088 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1089 s->progressive_frame = 1;
1090 s->progressive_sequence = 1;
1091 s->picture_structure = PICT_FRAME;
1092
1093 s->coded_picture_number = 0;
1094 s->picture_number = 0;
1095
1096 s->f_code = 1;
1097 s->b_code = 1;
1098
1099 s->slice_context_count = 1;
1100}
1101
1102/**
1103 * Set the given MpegEncContext to defaults for decoding.
1104 * the changed fields will not depend upon
1105 * the prior state of the MpegEncContext.
1106 */
1107void ff_mpv_decode_defaults(MpegEncContext *s)
1108{
1109 ff_mpv_common_defaults(s);
1110}
1111
1112void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
1113{
1114 s->avctx = avctx;
1115 s->width = avctx->coded_width;
1116 s->height = avctx->coded_height;
1117 s->codec_id = avctx->codec->id;
1118 s->workaround_bugs = avctx->workaround_bugs;
1119 s->flags = avctx->flags;
1120 s->flags2 = avctx->flags2;
1121
1122 /* convert fourcc to upper case */
1123 s->codec_tag = avpriv_toupper4(avctx->codec_tag);
1124
1125 s->stream_codec_tag = avpriv_toupper4(avctx->stream_codec_tag);
1126}
1127
1128static int init_er(MpegEncContext *s)
1129{
1130 ERContext *er = &s->er;
1131 int mb_array_size = s->mb_height * s->mb_stride;
1132 int i;
1133
1134 er->avctx = s->avctx;
1135 er->mecc = &s->mecc;
1136
1137 er->mb_index2xy = s->mb_index2xy;
1138 er->mb_num = s->mb_num;
1139 er->mb_width = s->mb_width;
1140 er->mb_height = s->mb_height;
1141 er->mb_stride = s->mb_stride;
1142 er->b8_stride = s->b8_stride;
1143
1144 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1145 er->error_status_table = av_mallocz(mb_array_size);
1146 if (!er->er_temp_buffer || !er->error_status_table)
1147 goto fail;
1148
1149 er->mbskip_table = s->mbskip_table;
1150 er->mbintra_table = s->mbintra_table;
1151
1152 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1153 er->dc_val[i] = s->dc_val[i];
1154
1155 er->decode_mb = mpeg_er_decode_mb;
1156 er->opaque = s;
1157
1158 return 0;
1159fail:
1160 av_freep(&er->er_temp_buffer);
1161 av_freep(&er->error_status_table);
1162 return AVERROR(ENOMEM);
1163}
1164
1165/**
1166 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1167 */
1168static int init_context_frame(MpegEncContext *s)
1169{
1170 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1171
1172 s->mb_width = (s->width + 15) / 16;
1173 s->mb_stride = s->mb_width + 1;
1174 s->b8_stride = s->mb_width * 2 + 1;
1175 mb_array_size = s->mb_height * s->mb_stride;
1176 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1177
1178 /* set default edge pos, will be overridden
1179 * in decode_header if needed */
1180 s->h_edge_pos = s->mb_width * 16;
1181 s->v_edge_pos = s->mb_height * 16;
1182
1183 s->mb_num = s->mb_width * s->mb_height;
1184
1185 s->block_wrap[0] =
1186 s->block_wrap[1] =
1187 s->block_wrap[2] =
1188 s->block_wrap[3] = s->b8_stride;
1189 s->block_wrap[4] =
1190 s->block_wrap[5] = s->mb_stride;
1191
1192 y_size = s->b8_stride * (2 * s->mb_height + 1);
1193 c_size = s->mb_stride * (s->mb_height + 1);
1194 yc_size = y_size + 2 * c_size;
1195
1196 if (s->mb_height & 1)
1197 yc_size += 2*s->b8_stride + 2*s->mb_stride;
1198
1199 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1200 for (y = 0; y < s->mb_height; y++)
1201 for (x = 0; x < s->mb_width; x++)
1202 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1203
1204 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1205
1206 if (s->encoding) {
1207 /* Allocate MV tables */
1208 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1209 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1210 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1211 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1212 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1213 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1214 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1215 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1216 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1217 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
1218 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
1219 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1220
1221 /* Allocate MB type table */
1222 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1223
1224 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1225
1226 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1227 mb_array_size * sizeof(float), fail);
1228 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1229 mb_array_size * sizeof(float), fail);
1230
1231 }
1232
1233 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1234 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1235 /* interlaced direct mode decoding tables */
1236 for (i = 0; i < 2; i++) {
1237 int j, k;
1238 for (j = 0; j < 2; j++) {
1239 for (k = 0; k < 2; k++) {
1240 FF_ALLOCZ_OR_GOTO(s->avctx,
1241 s->b_field_mv_table_base[i][j][k],
1242 mv_table_size * 2 * sizeof(int16_t),
1243 fail);
1244 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1245 s->mb_stride + 1;
1246 }
1247 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1248 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1249 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1250 }
1251 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1252 }
1253 }
1254 if (s->out_format == FMT_H263) {
1255 /* cbp values */
1256 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1257 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1258
1259 /* cbp, ac_pred, pred_dir */
1260 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1261 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1262 }
1263
1264 if (s->h263_pred || s->h263_plus || !s->encoding) {
1265 /* dc values */
1266 // MN: we need these for error resilience of intra-frames
1267 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1268 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1269 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1270 s->dc_val[2] = s->dc_val[1] + c_size;
1271 for (i = 0; i < yc_size; i++)
1272 s->dc_val_base[i] = 1024;
1273 }
1274
1275 /* which mb is a intra block */
1276 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1277 memset(s->mbintra_table, 1, mb_array_size);
1278
1279 /* init macroblock skip table */
1280 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1281 // Note the + 1 is for a quicker mpeg4 slice_end detection
1282
1283 return init_er(s);
1284fail:
1285 return AVERROR(ENOMEM);
1286}
1287
1288/**
1289 * init common structure for both encoder and decoder.
1290 * this assumes that some variables like width/height are already set
1291 */
1292av_cold int ff_mpv_common_init(MpegEncContext *s)
1293{
1294 int i;
1295 int nb_slices = (HAVE_THREADS &&
1296 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1297 s->avctx->thread_count : 1;
1298
1299 if (s->encoding && s->avctx->slices)
1300 nb_slices = s->avctx->slices;
1301
1302 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1303 s->mb_height = (s->height + 31) / 32 * 2;
1304 else
1305 s->mb_height = (s->height + 15) / 16;
1306
1307 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1308 av_log(s->avctx, AV_LOG_ERROR,
1309 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1310 return -1;
1311 }
1312
1313 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1314 int max_slices;
1315 if (s->mb_height)
1316 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1317 else
1318 max_slices = MAX_THREADS;
1319 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1320 " reducing to %d\n", nb_slices, max_slices);
1321 nb_slices = max_slices;
1322 }
1323
1324 if ((s->width || s->height) &&
1325 av_image_check_size(s->width, s->height, 0, s->avctx))
1326 return -1;
1327
1328 dct_init(s);
1329
1330 s->flags = s->avctx->flags;
1331 s->flags2 = s->avctx->flags2;
1332
1333 /* set chroma shifts */
1334 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1335 &s->chroma_x_shift,
1336 &s->chroma_y_shift);
1337
1338
1339 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1340 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1341 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1342 s->picture[i].f = av_frame_alloc();
1343 if (!s->picture[i].f)
1344 goto fail;
1345 }
1346 memset(&s->next_picture, 0, sizeof(s->next_picture));
1347 memset(&s->last_picture, 0, sizeof(s->last_picture));
1348 memset(&s->current_picture, 0, sizeof(s->current_picture));
1349 memset(&s->new_picture, 0, sizeof(s->new_picture));
1350 s->next_picture.f = av_frame_alloc();
1351 if (!s->next_picture.f)
1352 goto fail;
1353 s->last_picture.f = av_frame_alloc();
1354 if (!s->last_picture.f)
1355 goto fail;
1356 s->current_picture.f = av_frame_alloc();
1357 if (!s->current_picture.f)
1358 goto fail;
1359 s->new_picture.f = av_frame_alloc();
1360 if (!s->new_picture.f)
1361 goto fail;
1362
1363 if (init_context_frame(s))
1364 goto fail;
1365
1366 s->parse_context.state = -1;
1367
1368 s->context_initialized = 1;
1369 s->thread_context[0] = s;
1370
1371// if (s->width && s->height) {
1372 if (nb_slices > 1) {
1373 for (i = 1; i < nb_slices; i++) {
1374 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1375 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1376 }
1377
1378 for (i = 0; i < nb_slices; i++) {
1379 if (init_duplicate_context(s->thread_context[i]) < 0)
1380 goto fail;
1381 s->thread_context[i]->start_mb_y =
1382 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1383 s->thread_context[i]->end_mb_y =
1384 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1385 }
1386 } else {
1387 if (init_duplicate_context(s) < 0)
1388 goto fail;
1389 s->start_mb_y = 0;
1390 s->end_mb_y = s->mb_height;
1391 }
1392 s->slice_context_count = nb_slices;
1393// }
1394
1395 return 0;
1396 fail:
1397 ff_mpv_common_end(s);
1398 return -1;
1399}
1400
1401/**
1402 * Frees and resets MpegEncContext fields depending on the resolution.
1403 * Is used during resolution changes to avoid a full reinitialization of the
1404 * codec.
1405 */
1406static void free_context_frame(MpegEncContext *s)
1407{
1408 int i, j, k;
1409
1410 av_freep(&s->mb_type);
1411 av_freep(&s->p_mv_table_base);
1412 av_freep(&s->b_forw_mv_table_base);
1413 av_freep(&s->b_back_mv_table_base);
1414 av_freep(&s->b_bidir_forw_mv_table_base);
1415 av_freep(&s->b_bidir_back_mv_table_base);
1416 av_freep(&s->b_direct_mv_table_base);
1417 s->p_mv_table = NULL;
1418 s->b_forw_mv_table = NULL;
1419 s->b_back_mv_table = NULL;
1420 s->b_bidir_forw_mv_table = NULL;
1421 s->b_bidir_back_mv_table = NULL;
1422 s->b_direct_mv_table = NULL;
1423 for (i = 0; i < 2; i++) {
1424 for (j = 0; j < 2; j++) {
1425 for (k = 0; k < 2; k++) {
1426 av_freep(&s->b_field_mv_table_base[i][j][k]);
1427 s->b_field_mv_table[i][j][k] = NULL;
1428 }
1429 av_freep(&s->b_field_select_table[i][j]);
1430 av_freep(&s->p_field_mv_table_base[i][j]);
1431 s->p_field_mv_table[i][j] = NULL;
1432 }
1433 av_freep(&s->p_field_select_table[i]);
1434 }
1435
1436 av_freep(&s->dc_val_base);
1437 av_freep(&s->coded_block_base);
1438 av_freep(&s->mbintra_table);
1439 av_freep(&s->cbp_table);
1440 av_freep(&s->pred_dir_table);
1441
1442 av_freep(&s->mbskip_table);
1443
1444 av_freep(&s->er.error_status_table);
1445 av_freep(&s->er.er_temp_buffer);
1446 av_freep(&s->mb_index2xy);
1447 av_freep(&s->lambda_table);
1448
1449 av_freep(&s->cplx_tab);
1450 av_freep(&s->bits_tab);
1451
1452 s->linesize = s->uvlinesize = 0;
1453}
1454
1455int ff_mpv_common_frame_size_change(MpegEncContext *s)
1456{
1457 int i, err = 0;
1458
1459 if (!s->context_initialized)
1460 return AVERROR(EINVAL);
1461
1462 if (s->slice_context_count > 1) {
1463 for (i = 0; i < s->slice_context_count; i++) {
1464 free_duplicate_context(s->thread_context[i]);
1465 }
1466 for (i = 1; i < s->slice_context_count; i++) {
1467 av_freep(&s->thread_context[i]);
1468 }
1469 } else
1470 free_duplicate_context(s);
1471
1472 free_context_frame(s);
1473
1474 if (s->picture)
1475 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1476 s->picture[i].needs_realloc = 1;
1477 }
1478
1479 s->last_picture_ptr =
1480 s->next_picture_ptr =
1481 s->current_picture_ptr = NULL;
1482
1483 // init
1484 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1485 s->mb_height = (s->height + 31) / 32 * 2;
1486 else
1487 s->mb_height = (s->height + 15) / 16;
1488
1489 if ((s->width || s->height) &&
1490 (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1491 goto fail;
1492
1493 if ((err = init_context_frame(s)))
1494 goto fail;
1495
1496 s->thread_context[0] = s;
1497
1498 if (s->width && s->height) {
1499 int nb_slices = s->slice_context_count;
1500 if (nb_slices > 1) {
1501 for (i = 1; i < nb_slices; i++) {
1502 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1503 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1504 }
1505
1506 for (i = 0; i < nb_slices; i++) {
1507 if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1508 goto fail;
1509 s->thread_context[i]->start_mb_y =
1510 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1511 s->thread_context[i]->end_mb_y =
1512 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1513 }
1514 } else {
1515 err = init_duplicate_context(s);
1516 if (err < 0)
1517 goto fail;
1518 s->start_mb_y = 0;
1519 s->end_mb_y = s->mb_height;
1520 }
1521 s->slice_context_count = nb_slices;
1522 }
1523
1524 return 0;
1525 fail:
1526 ff_mpv_common_end(s);
1527 return err;
1528}
1529
1530/* init common structure for both encoder and decoder */
1531void ff_mpv_common_end(MpegEncContext *s)
1532{
1533 int i;
1534
1535 if (s->slice_context_count > 1) {
1536 for (i = 0; i < s->slice_context_count; i++) {
1537 free_duplicate_context(s->thread_context[i]);
1538 }
1539 for (i = 1; i < s->slice_context_count; i++) {
1540 av_freep(&s->thread_context[i]);
1541 }
1542 s->slice_context_count = 1;
1543 } else free_duplicate_context(s);
1544
1545 av_freep(&s->parse_context.buffer);
1546 s->parse_context.buffer_size = 0;
1547
1548 av_freep(&s->bitstream_buffer);
1549 s->allocated_bitstream_buffer_size = 0;
1550
1551 if (s->picture) {
1552 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1553 ff_free_picture_tables(&s->picture[i]);
1554 ff_mpeg_unref_picture(s, &s->picture[i]);
1555 av_frame_free(&s->picture[i].f);
1556 }
1557 }
1558 av_freep(&s->picture);
1559 ff_free_picture_tables(&s->last_picture);
1560 ff_mpeg_unref_picture(s, &s->last_picture);
1561 av_frame_free(&s->last_picture.f);
1562 ff_free_picture_tables(&s->current_picture);
1563 ff_mpeg_unref_picture(s, &s->current_picture);
1564 av_frame_free(&s->current_picture.f);
1565 ff_free_picture_tables(&s->next_picture);
1566 ff_mpeg_unref_picture(s, &s->next_picture);
1567 av_frame_free(&s->next_picture.f);
1568 ff_free_picture_tables(&s->new_picture);
1569 ff_mpeg_unref_picture(s, &s->new_picture);
1570 av_frame_free(&s->new_picture.f);
1571
1572 free_context_frame(s);
1573
1574 s->context_initialized = 0;
1575 s->last_picture_ptr =
1576 s->next_picture_ptr =
1577 s->current_picture_ptr = NULL;
1578 s->linesize = s->uvlinesize = 0;
1579}
1580
1581av_cold void ff_init_rl(RLTable *rl,
1582 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1583{
1584 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1585 uint8_t index_run[MAX_RUN + 1];
1586 int last, run, level, start, end, i;
1587
1588 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1589 if (static_store && rl->max_level[0])
1590 return;
1591
1592 /* compute max_level[], max_run[] and index_run[] */
1593 for (last = 0; last < 2; last++) {
1594 if (last == 0) {
1595 start = 0;
1596 end = rl->last;
1597 } else {
1598 start = rl->last;
1599 end = rl->n;
1600 }
1601
1602 memset(max_level, 0, MAX_RUN + 1);
1603 memset(max_run, 0, MAX_LEVEL + 1);
1604 memset(index_run, rl->n, MAX_RUN + 1);
1605 for (i = start; i < end; i++) {
1606 run = rl->table_run[i];
1607 level = rl->table_level[i];
1608 if (index_run[run] == rl->n)
1609 index_run[run] = i;
1610 if (level > max_level[run])
1611 max_level[run] = level;
1612 if (run > max_run[level])
1613 max_run[level] = run;
1614 }
1615 if (static_store)
1616 rl->max_level[last] = static_store[last];
1617 else
1618 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1619 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1620 if (static_store)
1621 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1622 else
1623 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1624 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1625 if (static_store)
1626 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1627 else
1628 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1629 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1630 }
1631}
1632
1633av_cold void ff_init_vlc_rl(RLTable *rl, unsigned static_size)
1634{
1635 int i, q;
1636 VLC_TYPE table[1500][2] = {{0}};
1637 VLC vlc = { .table = table, .table_allocated = static_size };
1638 av_assert0(static_size <= FF_ARRAY_ELEMS(table));
1639 init_vlc(&vlc, 9, rl->n + 1, &rl->table_vlc[0][1], 4, 2, &rl->table_vlc[0][0], 4, 2, INIT_VLC_USE_NEW_STATIC);
1640
1641 for (q = 0; q < 32; q++) {
1642 int qmul = q * 2;
1643 int qadd = (q - 1) | 1;
1644
1645 if (q == 0) {
1646 qmul = 1;
1647 qadd = 0;
1648 }
1649 for (i = 0; i < vlc.table_size; i++) {
1650 int code = vlc.table[i][0];
1651 int len = vlc.table[i][1];
1652 int level, run;
1653
1654 if (len == 0) { // illegal code
1655 run = 66;
1656 level = MAX_LEVEL;
1657 } else if (len < 0) { // more bits needed
1658 run = 0;
1659 level = code;
1660 } else {
1661 if (code == rl->n) { // esc
1662 run = 66;
1663 level = 0;
1664 } else {
1665 run = rl->table_run[code] + 1;
1666 level = rl->table_level[code] * qmul + qadd;
1667 if (code >= rl->last) run += 192;
1668 }
1669 }
1670 rl->rl_vlc[q][i].len = len;
1671 rl->rl_vlc[q][i].level = level;
1672 rl->rl_vlc[q][i].run = run;
1673 }
1674 }
1675}
1676
1677static void release_unused_pictures(MpegEncContext *s)
1678{
1679 int i;
1680
1681 /* release non reference frames */
1682 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1683 if (!s->picture[i].reference)
1684 ff_mpeg_unref_picture(s, &s->picture[i]);
1685 }
1686}
1687
1688static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1689{
1690 if (pic == s->last_picture_ptr)
1691 return 0;
1692 if (!pic->f->buf[0])
1693 return 1;
1694 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1695 return 1;
1696 return 0;
1697}
1698
1699static int find_unused_picture(MpegEncContext *s, int shared)
1700{
1701 int i;
1702
1703 if (shared) {
1704 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1705 if (!s->picture[i].f->buf[0] && &s->picture[i] != s->last_picture_ptr)
1706 return i;
1707 }
1708 } else {
1709 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1710 if (pic_is_unused(s, &s->picture[i]))
1711 return i;
1712 }
1713 }
1714
1715 av_log(s->avctx, AV_LOG_FATAL,
1716 "Internal error, picture buffer overflow\n");
1717 /* We could return -1, but the codec would crash trying to draw into a
1718 * non-existing frame anyway. This is safer than waiting for a random crash.
1719 * Also the return of this is never useful, an encoder must only allocate
1720 * as much as allowed in the specification. This has no relationship to how
1721 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1722 * enough for such valid streams).
1723 * Plus, a decoder has to check stream validity and remove frames if too
1724 * many reference frames are around. Waiting for "OOM" is not correct at
1725 * all. Similarly, missing reference frames have to be replaced by
1726 * interpolated/MC frames, anything else is a bug in the codec ...
1727 */
1728 abort();
1729 return -1;
1730}
1731
1732int ff_find_unused_picture(MpegEncContext *s, int shared)
1733{
1734 int ret = find_unused_picture(s, shared);
1735
1736 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1737 if (s->picture[ret].needs_realloc) {
1738 s->picture[ret].needs_realloc = 0;
1739 ff_free_picture_tables(&s->picture[ret]);
1740 ff_mpeg_unref_picture(s, &s->picture[ret]);
1741 }
1742 }
1743 return ret;
1744}
1745
1746static void gray_frame(AVFrame *frame)
1747{
1748 int i, h_chroma_shift, v_chroma_shift;
1749
1750 av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1751
1752 for(i=0; i<frame->height; i++)
1753 memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1754 for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1755 memset(frame->data[1] + frame->linesize[1]*i,
1756 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1757 memset(frame->data[2] + frame->linesize[2]*i,
1758 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1759 }
1760}
1761
1762/**
1763 * generic function called after decoding
1764 * the header and before a frame is decoded.
1765 */
1766int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1767{
1768 int i, ret;
1769 Picture *pic;
1770 s->mb_skipped = 0;
1771
1772 if (!ff_thread_can_start_frame(avctx)) {
1773 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1774 return -1;
1775 }
1776
1777 /* mark & release old frames */
1778 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1779 s->last_picture_ptr != s->next_picture_ptr &&
1780 s->last_picture_ptr->f->buf[0]) {
1781 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1782 }
1783
1784 /* release forgotten pictures */
1785 /* if (mpeg124/h263) */
1786 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1787 if (&s->picture[i] != s->last_picture_ptr &&
1788 &s->picture[i] != s->next_picture_ptr &&
1789 s->picture[i].reference && !s->picture[i].needs_realloc) {
1790 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1791 av_log(avctx, AV_LOG_ERROR,
1792 "releasing zombie picture\n");
1793 ff_mpeg_unref_picture(s, &s->picture[i]);
1794 }
1795 }
1796
1797 ff_mpeg_unref_picture(s, &s->current_picture);
1798
1799 release_unused_pictures(s);
1800
1801 if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1802 // we already have a unused image
1803 // (maybe it was set before reading the header)
1804 pic = s->current_picture_ptr;
1805 } else {
1806 i = ff_find_unused_picture(s, 0);
1807 if (i < 0) {
1808 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1809 return i;
1810 }
1811 pic = &s->picture[i];
1812 }
1813
1814 pic->reference = 0;
1815 if (!s->droppable) {
1816 if (s->pict_type != AV_PICTURE_TYPE_B)
1817 pic->reference = 3;
1818 }
1819
1820 pic->f->coded_picture_number = s->coded_picture_number++;
1821
1822 if (ff_alloc_picture(s, pic, 0) < 0)
1823 return -1;
1824
1825 s->current_picture_ptr = pic;
1826 // FIXME use only the vars from current_pic
1827 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1828 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1829 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1830 if (s->picture_structure != PICT_FRAME)
1831 s->current_picture_ptr->f->top_field_first =
1832 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1833 }
1834 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1835 !s->progressive_sequence;
1836 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1837
1838 s->current_picture_ptr->f->pict_type = s->pict_type;
1839 // if (s->flags && CODEC_FLAG_QSCALE)
1840 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1841 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1842
1843 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1844 s->current_picture_ptr)) < 0)
1845 return ret;
1846
1847 if (s->pict_type != AV_PICTURE_TYPE_B) {
1848 s->last_picture_ptr = s->next_picture_ptr;
1849 if (!s->droppable)
1850 s->next_picture_ptr = s->current_picture_ptr;
1851 }
1852 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1853 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1854 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1855 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1856 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1857 s->pict_type, s->droppable);
1858
1859 if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1860 (s->pict_type != AV_PICTURE_TYPE_I ||
1861 s->picture_structure != PICT_FRAME)) {
1862 int h_chroma_shift, v_chroma_shift;
1863 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1864 &h_chroma_shift, &v_chroma_shift);
1865 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1866 av_log(avctx, AV_LOG_DEBUG,
1867 "allocating dummy last picture for B frame\n");
1868 else if (s->pict_type != AV_PICTURE_TYPE_I)
1869 av_log(avctx, AV_LOG_ERROR,
1870 "warning: first frame is no keyframe\n");
1871 else if (s->picture_structure != PICT_FRAME)
1872 av_log(avctx, AV_LOG_DEBUG,
1873 "allocate dummy last picture for field based first keyframe\n");
1874
1875 /* Allocate a dummy frame */
1876 i = ff_find_unused_picture(s, 0);
1877 if (i < 0) {
1878 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1879 return i;
1880 }
1881 s->last_picture_ptr = &s->picture[i];
1882
1883 s->last_picture_ptr->reference = 3;
1884 s->last_picture_ptr->f->key_frame = 0;
1885 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1886
1887 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1888 s->last_picture_ptr = NULL;
1889 return -1;
1890 }
1891
1892 if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1893 for(i=0; i<avctx->height; i++)
1894 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1895 0x80, avctx->width);
1896 for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1897 memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1898 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1899 memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1900 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1901 }
1902
1903 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1904 for(i=0; i<avctx->height; i++)
1905 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1906 }
1907 }
1908
1909 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1910 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1911 }
1912 if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1913 s->pict_type == AV_PICTURE_TYPE_B) {
1914 /* Allocate a dummy frame */
1915 i = ff_find_unused_picture(s, 0);
1916 if (i < 0) {
1917 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1918 return i;
1919 }
1920 s->next_picture_ptr = &s->picture[i];
1921
1922 s->next_picture_ptr->reference = 3;
1923 s->next_picture_ptr->f->key_frame = 0;
1924 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1925
1926 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1927 s->next_picture_ptr = NULL;
1928 return -1;
1929 }
1930 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1931 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1932 }
1933
1934#if 0 // BUFREF-FIXME
1935 memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1936 memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1937#endif
1938 if (s->last_picture_ptr) {
1939 ff_mpeg_unref_picture(s, &s->last_picture);
1940 if (s->last_picture_ptr->f->buf[0] &&
1941 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1942 s->last_picture_ptr)) < 0)
1943 return ret;
1944 }
1945 if (s->next_picture_ptr) {
1946 ff_mpeg_unref_picture(s, &s->next_picture);
1947 if (s->next_picture_ptr->f->buf[0] &&
1948 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1949 s->next_picture_ptr)) < 0)
1950 return ret;
1951 }
1952
1953 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1954 s->last_picture_ptr->f->buf[0]));
1955
1956 if (s->picture_structure!= PICT_FRAME) {
1957 int i;
1958 for (i = 0; i < 4; i++) {
1959 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1960 s->current_picture.f->data[i] +=
1961 s->current_picture.f->linesize[i];
1962 }
1963 s->current_picture.f->linesize[i] *= 2;
1964 s->last_picture.f->linesize[i] *= 2;
1965 s->next_picture.f->linesize[i] *= 2;
1966 }
1967 }
1968
1969 s->err_recognition = avctx->err_recognition;
1970
1971 /* set dequantizer, we can't do it during init as
1972 * it might change for mpeg4 and we can't do it in the header
1973 * decode as init is not called for mpeg4 there yet */
1974 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1975 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1976 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1977 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1978 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1979 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1980 } else {
1981 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1982 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1983 }
1984
1985 if (s->avctx->debug & FF_DEBUG_NOMC) {
1986 gray_frame(s->current_picture_ptr->f);
1987 }
1988
1989 return 0;
1990}
1991
1992/* called after a frame has been decoded. */
1993void ff_mpv_frame_end(MpegEncContext *s)
1994{
1995 emms_c();
1996
1997 if (s->current_picture.reference)
1998 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1999}
2000
2001
2002#if FF_API_VISMV
2003static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
2004{
2005 if(*sx > *ex)
2006 return clip_line(ex, ey, sx, sy, maxx);
2007
2008 if (*sx < 0) {
2009 if (*ex < 0)
2010 return 1;
2011 *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
2012 *sx = 0;
2013 }
2014
2015 if (*ex > maxx) {
2016 if (*sx > maxx)
2017 return 1;
2018 *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
2019 *ex = maxx;
2020 }
2021 return 0;
2022}
2023
2024
2025/**
2026 * Draw a line from (ex, ey) -> (sx, sy).
2027 * @param w width of the image
2028 * @param h height of the image
2029 * @param stride stride/linesize of the image
2030 * @param color color of the arrow
2031 */
2032static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
2033 int w, int h, int stride, int color)
2034{
2035 int x, y, fr, f;
2036
2037 if (clip_line(&sx, &sy, &ex, &ey, w - 1))
2038 return;
2039 if (clip_line(&sy, &sx, &ey, &ex, h - 1))
2040 return;
2041
2042 sx = av_clip(sx, 0, w - 1);
2043 sy = av_clip(sy, 0, h - 1);
2044 ex = av_clip(ex, 0, w - 1);
2045 ey = av_clip(ey, 0, h - 1);
2046
2047 buf[sy * stride + sx] += color;
2048
2049 if (FFABS(ex - sx) > FFABS(ey - sy)) {
2050 if (sx > ex) {
2051 FFSWAP(int, sx, ex);
2052 FFSWAP(int, sy, ey);
2053 }
2054 buf += sx + sy * stride;
2055 ex -= sx;
2056 f = ((ey - sy) << 16) / ex;
2057 for (x = 0; x <= ex; x++) {
2058 y = (x * f) >> 16;
2059 fr = (x * f) & 0xFFFF;
2060 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2061 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2062 }
2063 } else {
2064 if (sy > ey) {
2065 FFSWAP(int, sx, ex);
2066 FFSWAP(int, sy, ey);
2067 }
2068 buf += sx + sy * stride;
2069 ey -= sy;
2070 if (ey)
2071 f = ((ex - sx) << 16) / ey;
2072 else
2073 f = 0;
2074 for(y= 0; y <= ey; y++){
2075 x = (y*f) >> 16;
2076 fr = (y*f) & 0xFFFF;
2077 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2078 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2079 }
2080 }
2081}
2082
2083/**
2084 * Draw an arrow from (ex, ey) -> (sx, sy).
2085 * @param w width of the image
2086 * @param h height of the image
2087 * @param stride stride/linesize of the image
2088 * @param color color of the arrow
2089 */
2090static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2091 int ey, int w, int h, int stride, int color, int tail, int direction)
2092{
2093 int dx,dy;
2094
2095 if (direction) {
2096 FFSWAP(int, sx, ex);
2097 FFSWAP(int, sy, ey);
2098 }
2099
2100 sx = av_clip(sx, -100, w + 100);
2101 sy = av_clip(sy, -100, h + 100);
2102 ex = av_clip(ex, -100, w + 100);
2103 ey = av_clip(ey, -100, h + 100);
2104
2105 dx = ex - sx;
2106 dy = ey - sy;
2107
2108 if (dx * dx + dy * dy > 3 * 3) {
2109 int rx = dx + dy;
2110 int ry = -dx + dy;
2111 int length = ff_sqrt((rx * rx + ry * ry) << 8);
2112
2113 // FIXME subpixel accuracy
2114 rx = ROUNDED_DIV(rx * 3 << 4, length);
2115 ry = ROUNDED_DIV(ry * 3 << 4, length);
2116
2117 if (tail) {
2118 rx = -rx;
2119 ry = -ry;
2120 }
2121
2122 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2123 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2124 }
2125 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2126}
2127#endif
2128
2129static int add_mb(AVMotionVector *mb, uint32_t mb_type,
2130 int dst_x, int dst_y,
2131 int src_x, int src_y,
2132 int direction)
2133{
2134 if (dst_x == src_x && dst_y == src_y)
2135 return 0;
2136 mb->w = IS_8X8(mb_type) || IS_8X16(mb_type) ? 8 : 16;
2137 mb->h = IS_8X8(mb_type) || IS_16X8(mb_type) ? 8 : 16;
2138 mb->src_x = src_x;
2139 mb->src_y = src_y;
2140 mb->dst_x = dst_x;
2141 mb->dst_y = dst_y;
2142 mb->source = direction ? 1 : -1;
2143 mb->flags = 0; // XXX: does mb_type contain extra information that could be exported here?
2144 return 1;
2145}
2146
2147/**
2148 * Print debugging info for the given picture.
2149 */
2150void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2151 uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2152 int *low_delay,
2153 int mb_width, int mb_height, int mb_stride, int quarter_sample)
2154{
2155 if ((avctx->flags2 & CODEC_FLAG2_EXPORT_MVS) && mbtype_table && motion_val[0]) {
2156 const int shift = 1 + quarter_sample;
2157 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2158 const int mv_stride = (mb_width << mv_sample_log2) +
2159 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2160 int mb_x, mb_y, mbcount = 0;
2161
2162 /* size is width * height * 2 * 4 where 2 is for directions and 4 is
2163 * for the maximum number of MB (4 MB in case of IS_8x8) */
2164 AVMotionVector *mvs = av_malloc_array(mb_width * mb_height, 2 * 4 * sizeof(AVMotionVector));
2165 if (!mvs)
2166 return;
2167
2168 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2169 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2170 int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride];
2171 for (direction = 0; direction < 2; direction++) {
2172 if (!USES_LIST(mb_type, direction))
2173 continue;
2174 if (IS_8X8(mb_type)) {
2175 for (i = 0; i < 4; i++) {
2176 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2177 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2178 int xy = (mb_x * 2 + (i & 1) +
2179 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2180 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2181 int my = (motion_val[direction][xy][1] >> shift) + sy;
2182 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2183 }
2184 } else if (IS_16X8(mb_type)) {
2185 for (i = 0; i < 2; i++) {
2186 int sx = mb_x * 16 + 8;
2187 int sy = mb_y * 16 + 4 + 8 * i;
2188 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2189 int mx = (motion_val[direction][xy][0] >> shift);
2190 int my = (motion_val[direction][xy][1] >> shift);
2191
2192 if (IS_INTERLACED(mb_type))
2193 my *= 2;
2194
2195 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2196 }
2197 } else if (IS_8X16(mb_type)) {
2198 for (i = 0; i < 2; i++) {
2199 int sx = mb_x * 16 + 4 + 8 * i;
2200 int sy = mb_y * 16 + 8;
2201 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2202 int mx = motion_val[direction][xy][0] >> shift;
2203 int my = motion_val[direction][xy][1] >> shift;
2204
2205 if (IS_INTERLACED(mb_type))
2206 my *= 2;
2207
2208 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2209 }
2210 } else {
2211 int sx = mb_x * 16 + 8;
2212 int sy = mb_y * 16 + 8;
2213 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
2214 int mx = (motion_val[direction][xy][0]>>shift) + sx;
2215 int my = (motion_val[direction][xy][1]>>shift) + sy;
2216 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2217 }
2218 }
2219 }
2220 }
2221
2222 if (mbcount) {
2223 AVFrameSideData *sd;
2224
2225 av_log(avctx, AV_LOG_DEBUG, "Adding %d MVs info to frame %d\n", mbcount, avctx->frame_number);
2226 sd = av_frame_new_side_data(pict, AV_FRAME_DATA_MOTION_VECTORS, mbcount * sizeof(AVMotionVector));
2227 if (!sd)
2228 return;
2229 memcpy(sd->data, mvs, mbcount * sizeof(AVMotionVector));
2230 }
2231
2232 av_freep(&mvs);
2233 }
2234
2235 /* TODO: export all the following to make them accessible for users (and filters) */
2236 if (avctx->hwaccel || !mbtype_table
2237 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
2238 return;
2239
2240
2241 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2242 int x,y;
2243
2244 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2245 av_get_picture_type_char(pict->pict_type));
2246 for (y = 0; y < mb_height; y++) {
2247 for (x = 0; x < mb_width; x++) {
2248 if (avctx->debug & FF_DEBUG_SKIP) {
2249 int count = mbskip_table[x + y * mb_stride];
2250 if (count > 9)
2251 count = 9;
2252 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2253 }
2254 if (avctx->debug & FF_DEBUG_QP) {
2255 av_log(avctx, AV_LOG_DEBUG, "%2d",
2256 qscale_table[x + y * mb_stride]);
2257 }
2258 if (avctx->debug & FF_DEBUG_MB_TYPE) {
2259 int mb_type = mbtype_table[x + y * mb_stride];
2260 // Type & MV direction
2261 if (IS_PCM(mb_type))
2262 av_log(avctx, AV_LOG_DEBUG, "P");
2263 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2264 av_log(avctx, AV_LOG_DEBUG, "A");
2265 else if (IS_INTRA4x4(mb_type))
2266 av_log(avctx, AV_LOG_DEBUG, "i");
2267 else if (IS_INTRA16x16(mb_type))
2268 av_log(avctx, AV_LOG_DEBUG, "I");
2269 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2270 av_log(avctx, AV_LOG_DEBUG, "d");
2271 else if (IS_DIRECT(mb_type))
2272 av_log(avctx, AV_LOG_DEBUG, "D");
2273 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2274 av_log(avctx, AV_LOG_DEBUG, "g");
2275 else if (IS_GMC(mb_type))
2276 av_log(avctx, AV_LOG_DEBUG, "G");
2277 else if (IS_SKIP(mb_type))
2278 av_log(avctx, AV_LOG_DEBUG, "S");
2279 else if (!USES_LIST(mb_type, 1))
2280 av_log(avctx, AV_LOG_DEBUG, ">");
2281 else if (!USES_LIST(mb_type, 0))
2282 av_log(avctx, AV_LOG_DEBUG, "<");
2283 else {
2284 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2285 av_log(avctx, AV_LOG_DEBUG, "X");
2286 }
2287
2288 // segmentation
2289 if (IS_8X8(mb_type))
2290 av_log(avctx, AV_LOG_DEBUG, "+");
2291 else if (IS_16X8(mb_type))
2292 av_log(avctx, AV_LOG_DEBUG, "-");
2293 else if (IS_8X16(mb_type))
2294 av_log(avctx, AV_LOG_DEBUG, "|");
2295 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2296 av_log(avctx, AV_LOG_DEBUG, " ");
2297 else
2298 av_log(avctx, AV_LOG_DEBUG, "?");
2299
2300
2301 if (IS_INTERLACED(mb_type))
2302 av_log(avctx, AV_LOG_DEBUG, "=");
2303 else
2304 av_log(avctx, AV_LOG_DEBUG, " ");
2305 }
2306 }
2307 av_log(avctx, AV_LOG_DEBUG, "\n");
2308 }
2309 }
2310
2311 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2312 (avctx->debug_mv)) {
2313 int mb_y;
2314 int i;
2315 int h_chroma_shift, v_chroma_shift, block_height;
2316#if FF_API_VISMV
2317 const int shift = 1 + quarter_sample;
2318 uint8_t *ptr;
2319 const int width = avctx->width;
2320 const int height = avctx->height;
2321#endif
2322 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2323 const int mv_stride = (mb_width << mv_sample_log2) +
2324 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2325
2326 *low_delay = 0; // needed to see the vectors without trashing the buffers
2327
2328 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2329
2330 av_frame_make_writable(pict);
2331
2332 pict->opaque = NULL;
2333#if FF_API_VISMV
2334 ptr = pict->data[0];
2335#endif
2336 block_height = 16 >> v_chroma_shift;
2337
2338 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2339 int mb_x;
2340 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2341 const int mb_index = mb_x + mb_y * mb_stride;
2342#if FF_API_VISMV
2343 if ((avctx->debug_mv) && motion_val[0]) {
2344 int type;
2345 for (type = 0; type < 3; type++) {
2346 int direction = 0;
2347 switch (type) {
2348 case 0:
2349 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2350 (pict->pict_type!= AV_PICTURE_TYPE_P))
2351 continue;
2352 direction = 0;
2353 break;
2354 case 1:
2355 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2356 (pict->pict_type!= AV_PICTURE_TYPE_B))
2357 continue;
2358 direction = 0;
2359 break;
2360 case 2:
2361 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2362 (pict->pict_type!= AV_PICTURE_TYPE_B))
2363 continue;
2364 direction = 1;
2365 break;
2366 }
2367 if (!USES_LIST(mbtype_table[mb_index], direction))
2368 continue;
2369
2370 if (IS_8X8(mbtype_table[mb_index])) {
2371 int i;
2372 for (i = 0; i < 4; i++) {
2373 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2374 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2375 int xy = (mb_x * 2 + (i & 1) +
2376 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2377 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2378 int my = (motion_val[direction][xy][1] >> shift) + sy;
2379 draw_arrow(ptr, sx, sy, mx, my, width,
2380 height, pict->linesize[0], 100, 0, direction);
2381 }
2382 } else if (IS_16X8(mbtype_table[mb_index])) {
2383 int i;
2384 for (i = 0; i < 2; i++) {
2385 int sx = mb_x * 16 + 8;
2386 int sy = mb_y * 16 + 4 + 8 * i;
2387 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2388 int mx = (motion_val[direction][xy][0] >> shift);
2389 int my = (motion_val[direction][xy][1] >> shift);
2390
2391 if (IS_INTERLACED(mbtype_table[mb_index]))
2392 my *= 2;
2393
2394 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2395 height, pict->linesize[0], 100, 0, direction);
2396 }
2397 } else if (IS_8X16(mbtype_table[mb_index])) {
2398 int i;
2399 for (i = 0; i < 2; i++) {
2400 int sx = mb_x * 16 + 4 + 8 * i;
2401 int sy = mb_y * 16 + 8;
2402 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2403 int mx = motion_val[direction][xy][0] >> shift;
2404 int my = motion_val[direction][xy][1] >> shift;
2405
2406 if (IS_INTERLACED(mbtype_table[mb_index]))
2407 my *= 2;
2408
2409 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2410 height, pict->linesize[0], 100, 0, direction);
2411 }
2412 } else {
2413 int sx= mb_x * 16 + 8;
2414 int sy= mb_y * 16 + 8;
2415 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2416 int mx= (motion_val[direction][xy][0]>>shift) + sx;
2417 int my= (motion_val[direction][xy][1]>>shift) + sy;
2418 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
2419 }
2420 }
2421 }
2422#endif
2423 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2424 uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2425 0x0101010101010101ULL;
2426 int y;
2427 for (y = 0; y < block_height; y++) {
2428 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2429 (block_height * mb_y + y) *
2430 pict->linesize[1]) = c;
2431 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2432 (block_height * mb_y + y) *
2433 pict->linesize[2]) = c;
2434 }
2435 }
2436 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2437 motion_val[0]) {
2438 int mb_type = mbtype_table[mb_index];
2439 uint64_t u,v;
2440 int y;
2441#define COLOR(theta, r) \
2442 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2443 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2444
2445
2446 u = v = 128;
2447 if (IS_PCM(mb_type)) {
2448 COLOR(120, 48)
2449 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2450 IS_INTRA16x16(mb_type)) {
2451 COLOR(30, 48)
2452 } else if (IS_INTRA4x4(mb_type)) {
2453 COLOR(90, 48)
2454 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2455 // COLOR(120, 48)
2456 } else if (IS_DIRECT(mb_type)) {
2457 COLOR(150, 48)
2458 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2459 COLOR(170, 48)
2460 } else if (IS_GMC(mb_type)) {
2461 COLOR(190, 48)
2462 } else if (IS_SKIP(mb_type)) {
2463 // COLOR(180, 48)
2464 } else if (!USES_LIST(mb_type, 1)) {
2465 COLOR(240, 48)
2466 } else if (!USES_LIST(mb_type, 0)) {
2467 COLOR(0, 48)
2468 } else {
2469 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2470 COLOR(300,48)
2471 }
2472
2473 u *= 0x0101010101010101ULL;
2474 v *= 0x0101010101010101ULL;
2475 for (y = 0; y < block_height; y++) {
2476 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2477 (block_height * mb_y + y) * pict->linesize[1]) = u;
2478 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2479 (block_height * mb_y + y) * pict->linesize[2]) = v;
2480 }
2481
2482 // segmentation
2483 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2484 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2485 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2486 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2487 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2488 }
2489 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2490 for (y = 0; y < 16; y++)
2491 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2492 pict->linesize[0]] ^= 0x80;
2493 }
2494 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2495 int dm = 1 << (mv_sample_log2 - 2);
2496 for (i = 0; i < 4; i++) {
2497 int sx = mb_x * 16 + 8 * (i & 1);
2498 int sy = mb_y * 16 + 8 * (i >> 1);
2499 int xy = (mb_x * 2 + (i & 1) +
2500 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2501 // FIXME bidir
2502 int32_t *mv = (int32_t *) &motion_val[0][xy];
2503 if (mv[0] != mv[dm] ||
2504 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2505 for (y = 0; y < 8; y++)
2506 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2507 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2508 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2509 pict->linesize[0]) ^= 0x8080808080808080ULL;
2510 }
2511 }
2512
2513 if (IS_INTERLACED(mb_type) &&
2514 avctx->codec->id == AV_CODEC_ID_H264) {
2515 // hmm
2516 }
2517 }
2518 mbskip_table[mb_index] = 0;
2519 }
2520 }
2521 }
2522}
2523
2524void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2525{
2526 ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
2527 p->qscale_table, p->motion_val, &s->low_delay,
2528 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2529}
2530
2531int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2532{
2533 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2534 int offset = 2*s->mb_stride + 1;
2535 if(!ref)
2536 return AVERROR(ENOMEM);
2537 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2538 ref->size -= offset;
2539 ref->data += offset;
2540 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2541}
2542
2543static inline int hpel_motion_lowres(MpegEncContext *s,
2544 uint8_t *dest, uint8_t *src,
2545 int field_based, int field_select,
2546 int src_x, int src_y,
2547 int width, int height, ptrdiff_t stride,
2548 int h_edge_pos, int v_edge_pos,
2549 int w, int h, h264_chroma_mc_func *pix_op,
2550 int motion_x, int motion_y)
2551{
2552 const int lowres = s->avctx->lowres;
2553 const int op_index = FFMIN(lowres, 3);
2554 const int s_mask = (2 << lowres) - 1;
2555 int emu = 0;
2556 int sx, sy;
2557
2558 if (s->quarter_sample) {
2559 motion_x /= 2;
2560 motion_y /= 2;
2561 }
2562
2563 sx = motion_x & s_mask;
2564 sy = motion_y & s_mask;
2565 src_x += motion_x >> lowres + 1;
2566 src_y += motion_y >> lowres + 1;
2567
2568 src += src_y * stride + src_x;
2569
2570 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2571 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2572 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2573 s->linesize, s->linesize,
2574 w + 1, (h + 1) << field_based,
2575 src_x, src_y << field_based,
2576 h_edge_pos, v_edge_pos);
2577 src = s->edge_emu_buffer;
2578 emu = 1;
2579 }
2580
2581 sx = (sx << 2) >> lowres;
2582 sy = (sy << 2) >> lowres;
2583 if (field_select)
2584 src += s->linesize;
2585 pix_op[op_index](dest, src, stride, h, sx, sy);
2586 return emu;
2587}
2588
2589/* apply one mpeg motion vector to the three components */
2590static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2591 uint8_t *dest_y,
2592 uint8_t *dest_cb,
2593 uint8_t *dest_cr,
2594 int field_based,
2595 int bottom_field,
2596 int field_select,
2597 uint8_t **ref_picture,
2598 h264_chroma_mc_func *pix_op,
2599 int motion_x, int motion_y,
2600 int h, int mb_y)
2601{
2602 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2603 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2604 ptrdiff_t uvlinesize, linesize;
2605 const int lowres = s->avctx->lowres;
2606 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2607 const int block_s = 8>>lowres;
2608 const int s_mask = (2 << lowres) - 1;
2609 const int h_edge_pos = s->h_edge_pos >> lowres;
2610 const int v_edge_pos = s->v_edge_pos >> lowres;
2611 linesize = s->current_picture.f->linesize[0] << field_based;
2612 uvlinesize = s->current_picture.f->linesize[1] << field_based;
2613
2614 // FIXME obviously not perfect but qpel will not work in lowres anyway
2615 if (s->quarter_sample) {
2616 motion_x /= 2;
2617 motion_y /= 2;
2618 }
2619
2620 if(field_based){
2621 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2622 }
2623
2624 sx = motion_x & s_mask;
2625 sy = motion_y & s_mask;
2626 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2627 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2628
2629 if (s->out_format == FMT_H263) {
2630 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2631 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2632 uvsrc_x = src_x >> 1;
2633 uvsrc_y = src_y >> 1;
2634 } else if (s->out_format == FMT_H261) {
2635 // even chroma mv's are full pel in H261
2636 mx = motion_x / 4;
2637 my = motion_y / 4;
2638 uvsx = (2 * mx) & s_mask;
2639 uvsy = (2 * my) & s_mask;
2640 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2641 uvsrc_y = mb_y * block_s + (my >> lowres);
2642 } else {
2643 if(s->chroma_y_shift){
2644 mx = motion_x / 2;
2645 my = motion_y / 2;
2646 uvsx = mx & s_mask;
2647 uvsy = my & s_mask;
2648 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2649 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2650 } else {
2651 if(s->chroma_x_shift){
2652 //Chroma422
2653 mx = motion_x / 2;
2654 uvsx = mx & s_mask;
2655 uvsy = motion_y & s_mask;
2656 uvsrc_y = src_y;
2657 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2658 } else {
2659 //Chroma444
2660 uvsx = motion_x & s_mask;
2661 uvsy = motion_y & s_mask;
2662 uvsrc_x = src_x;
2663 uvsrc_y = src_y;
2664 }
2665 }
2666 }
2667
2668 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2669 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2670 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2671
2672 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2673 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2674 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2675 linesize >> field_based, linesize >> field_based,
2676 17, 17 + field_based,
2677 src_x, src_y << field_based, h_edge_pos,
2678 v_edge_pos);
2679 ptr_y = s->edge_emu_buffer;
2680 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2681 uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2682 uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2683 s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2684 uvlinesize >> field_based, uvlinesize >> field_based,
2685 9, 9 + field_based,
2686 uvsrc_x, uvsrc_y << field_based,
2687 h_edge_pos >> 1, v_edge_pos >> 1);
2688 s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2689 uvlinesize >> field_based,uvlinesize >> field_based,
2690 9, 9 + field_based,
2691 uvsrc_x, uvsrc_y << field_based,
2692 h_edge_pos >> 1, v_edge_pos >> 1);
2693 ptr_cb = ubuf;
2694 ptr_cr = vbuf;
2695 }
2696 }
2697
2698 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2699 if (bottom_field) {
2700 dest_y += s->linesize;
2701 dest_cb += s->uvlinesize;
2702 dest_cr += s->uvlinesize;
2703 }
2704
2705 if (field_select) {
2706 ptr_y += s->linesize;
2707 ptr_cb += s->uvlinesize;
2708 ptr_cr += s->uvlinesize;
2709 }
2710
2711 sx = (sx << 2) >> lowres;
2712 sy = (sy << 2) >> lowres;
2713 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2714
2715 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2716 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2717 uvsx = (uvsx << 2) >> lowres;
2718 uvsy = (uvsy << 2) >> lowres;
2719 if (hc) {
2720 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2721 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2722 }
2723 }
2724 // FIXME h261 lowres loop filter
2725}
2726
2727static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2728 uint8_t *dest_cb, uint8_t *dest_cr,
2729 uint8_t **ref_picture,
2730 h264_chroma_mc_func * pix_op,
2731 int mx, int my)
2732{
2733 const int lowres = s->avctx->lowres;
2734 const int op_index = FFMIN(lowres, 3);
2735 const int block_s = 8 >> lowres;
2736 const int s_mask = (2 << lowres) - 1;
2737 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2738 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2739 int emu = 0, src_x, src_y, sx, sy;
2740 ptrdiff_t offset;
2741 uint8_t *ptr;
2742
2743 if (s->quarter_sample) {
2744 mx /= 2;
2745 my /= 2;
2746 }
2747
2748 /* In case of 8X8, we construct a single chroma motion vector
2749 with a special rounding */
2750 mx = ff_h263_round_chroma(mx);
2751 my = ff_h263_round_chroma(my);
2752
2753 sx = mx & s_mask;
2754 sy = my & s_mask;
2755 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2756 src_y = s->mb_y * block_s + (my >> lowres + 1);
2757
2758 offset = src_y * s->uvlinesize + src_x;
2759 ptr = ref_picture[1] + offset;
2760 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2761 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2762 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2763 s->uvlinesize, s->uvlinesize,
2764 9, 9,
2765 src_x, src_y, h_edge_pos, v_edge_pos);
2766 ptr = s->edge_emu_buffer;
2767 emu = 1;
2768 }
2769 sx = (sx << 2) >> lowres;
2770 sy = (sy << 2) >> lowres;
2771 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2772
2773 ptr = ref_picture[2] + offset;
2774 if (emu) {
2775 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2776 s->uvlinesize, s->uvlinesize,
2777 9, 9,
2778 src_x, src_y, h_edge_pos, v_edge_pos);
2779 ptr = s->edge_emu_buffer;
2780 }
2781 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2782}
2783
2784/**
2785 * motion compensation of a single macroblock
2786 * @param s context
2787 * @param dest_y luma destination pointer
2788 * @param dest_cb chroma cb/u destination pointer
2789 * @param dest_cr chroma cr/v destination pointer
2790 * @param dir direction (0->forward, 1->backward)
2791 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2792 * @param pix_op halfpel motion compensation function (average or put normally)
2793 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2794 */
2795static inline void MPV_motion_lowres(MpegEncContext *s,
2796 uint8_t *dest_y, uint8_t *dest_cb,
2797 uint8_t *dest_cr,
2798 int dir, uint8_t **ref_picture,
2799 h264_chroma_mc_func *pix_op)
2800{
2801 int mx, my;
2802 int mb_x, mb_y, i;
2803 const int lowres = s->avctx->lowres;
2804 const int block_s = 8 >>lowres;
2805
2806 mb_x = s->mb_x;
2807 mb_y = s->mb_y;
2808
2809 switch (s->mv_type) {
2810 case MV_TYPE_16X16:
2811 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2812 0, 0, 0,
2813 ref_picture, pix_op,
2814 s->mv[dir][0][0], s->mv[dir][0][1],
2815 2 * block_s, mb_y);
2816 break;
2817 case MV_TYPE_8X8:
2818 mx = 0;
2819 my = 0;
2820 for (i = 0; i < 4; i++) {
2821 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2822 s->linesize) * block_s,
2823 ref_picture[0], 0, 0,
2824 (2 * mb_x + (i & 1)) * block_s,
2825 (2 * mb_y + (i >> 1)) * block_s,
2826 s->width, s->height, s->linesize,
2827 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2828 block_s, block_s, pix_op,
2829 s->mv[dir][i][0], s->mv[dir][i][1]);
2830
2831 mx += s->mv[dir][i][0];
2832 my += s->mv[dir][i][1];
2833 }
2834
2835 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2836 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2837 pix_op, mx, my);
2838 break;
2839 case MV_TYPE_FIELD:
2840 if (s->picture_structure == PICT_FRAME) {
2841 /* top field */
2842 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2843 1, 0, s->field_select[dir][0],
2844 ref_picture, pix_op,
2845 s->mv[dir][0][0], s->mv[dir][0][1],
2846 block_s, mb_y);
2847 /* bottom field */
2848 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2849 1, 1, s->field_select[dir][1],
2850 ref_picture, pix_op,
2851 s->mv[dir][1][0], s->mv[dir][1][1],
2852 block_s, mb_y);
2853 } else {
2854 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2855 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2856 ref_picture = s->current_picture_ptr->f->data;
2857
2858 }
2859 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2860 0, 0, s->field_select[dir][0],
2861 ref_picture, pix_op,
2862 s->mv[dir][0][0],
2863 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2864 }
2865 break;
2866 case MV_TYPE_16X8:
2867 for (i = 0; i < 2; i++) {
2868 uint8_t **ref2picture;
2869
2870 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2871 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2872 ref2picture = ref_picture;
2873 } else {
2874 ref2picture = s->current_picture_ptr->f->data;
2875 }
2876
2877 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2878 0, 0, s->field_select[dir][i],
2879 ref2picture, pix_op,
2880 s->mv[dir][i][0], s->mv[dir][i][1] +
2881 2 * block_s * i, block_s, mb_y >> 1);
2882
2883 dest_y += 2 * block_s * s->linesize;
2884 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2885 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2886 }
2887 break;
2888 case MV_TYPE_DMV:
2889 if (s->picture_structure == PICT_FRAME) {
2890 for (i = 0; i < 2; i++) {
2891 int j;
2892 for (j = 0; j < 2; j++) {
2893 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2894 1, j, j ^ i,
2895 ref_picture, pix_op,
2896 s->mv[dir][2 * i + j][0],
2897 s->mv[dir][2 * i + j][1],
2898 block_s, mb_y);
2899 }
2900 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2901 }
2902 } else {
2903 for (i = 0; i < 2; i++) {
2904 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2905 0, 0, s->picture_structure != i + 1,
2906 ref_picture, pix_op,
2907 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2908 2 * block_s, mb_y >> 1);
2909
2910 // after put we make avg of the same block
2911 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2912
2913 // opposite parity is always in the same
2914 // frame if this is second field
2915 if (!s->first_field) {
2916 ref_picture = s->current_picture_ptr->f->data;
2917 }
2918 }
2919 }
2920 break;
2921 default:
2922 av_assert2(0);
2923 }
2924}
2925
2926/**
2927 * find the lowest MB row referenced in the MVs
2928 */
2929int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
2930{
2931 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2932 int my, off, i, mvs;
2933
2934 if (s->picture_structure != PICT_FRAME || s->mcsel)
2935 goto unhandled;
2936
2937 switch (s->mv_type) {
2938 case MV_TYPE_16X16:
2939 mvs = 1;
2940 break;
2941 case MV_TYPE_16X8:
2942 mvs = 2;
2943 break;
2944 case MV_TYPE_8X8:
2945 mvs = 4;
2946 break;
2947 default:
2948 goto unhandled;
2949 }
2950
2951 for (i = 0; i < mvs; i++) {
2952 my = s->mv[dir][i][1]<<qpel_shift;
2953 my_max = FFMAX(my_max, my);
2954 my_min = FFMIN(my_min, my);
2955 }
2956
2957 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2958
2959 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2960unhandled:
2961 return s->mb_height-1;
2962}
2963
2964/* put block[] to dest[] */
2965static inline void put_dct(MpegEncContext *s,
2966 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2967{
2968 s->dct_unquantize_intra(s, block, i, qscale);
2969 s->idsp.idct_put(dest, line_size, block);
2970}
2971
2972/* add block[] to dest[] */
2973static inline void add_dct(MpegEncContext *s,
2974 int16_t *block, int i, uint8_t *dest, int line_size)
2975{
2976 if (s->block_last_index[i] >= 0) {
2977 s->idsp.idct_add(dest, line_size, block);
2978 }
2979}
2980
2981static inline void add_dequant_dct(MpegEncContext *s,
2982 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2983{
2984 if (s->block_last_index[i] >= 0) {
2985 s->dct_unquantize_inter(s, block, i, qscale);
2986
2987 s->idsp.idct_add(dest, line_size, block);
2988 }
2989}
2990
2991/**
2992 * Clean dc, ac, coded_block for the current non-intra MB.
2993 */
2994void ff_clean_intra_table_entries(MpegEncContext *s)
2995{
2996 int wrap = s->b8_stride;
2997 int xy = s->block_index[0];
2998
2999 s->dc_val[0][xy ] =
3000 s->dc_val[0][xy + 1 ] =
3001 s->dc_val[0][xy + wrap] =
3002 s->dc_val[0][xy + 1 + wrap] = 1024;
3003 /* ac pred */
3004 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
3005 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
3006 if (s->msmpeg4_version>=3) {
3007 s->coded_block[xy ] =
3008 s->coded_block[xy + 1 ] =
3009 s->coded_block[xy + wrap] =
3010 s->coded_block[xy + 1 + wrap] = 0;
3011 }
3012 /* chroma */
3013 wrap = s->mb_stride;
3014 xy = s->mb_x + s->mb_y * wrap;
3015 s->dc_val[1][xy] =
3016 s->dc_val[2][xy] = 1024;
3017 /* ac pred */
3018 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
3019 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
3020
3021 s->mbintra_table[xy]= 0;
3022}
3023
3024/* generic function called after a macroblock has been parsed by the
3025 decoder or after it has been encoded by the encoder.
3026
3027 Important variables used:
3028 s->mb_intra : true if intra macroblock
3029 s->mv_dir : motion vector direction
3030 s->mv_type : motion vector type
3031 s->mv : motion vector
3032 s->interlaced_dct : true if interlaced dct used (mpeg2)
3033 */
3034static av_always_inline
3035void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
3036 int lowres_flag, int is_mpeg12)
3037{
3038 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
3039
3040 if (CONFIG_XVMC &&
3041 s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
3042 s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
3043 return;
3044 }
3045
3046 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
3047 /* print DCT coefficients */
3048 int i,j;
3049 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
3050 for(i=0; i<6; i++){
3051 for(j=0; j<64; j++){
3052 av_log(s->avctx, AV_LOG_DEBUG, "%5d",
3053 block[i][s->idsp.idct_permutation[j]]);
3054 }
3055 av_log(s->avctx, AV_LOG_DEBUG, "\n");
3056 }
3057 }
3058
3059 s->current_picture.qscale_table[mb_xy] = s->qscale;
3060
3061 /* update DC predictors for P macroblocks */
3062 if (!s->mb_intra) {
3063 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
3064 if(s->mbintra_table[mb_xy])
3065 ff_clean_intra_table_entries(s);
3066 } else {
3067 s->last_dc[0] =
3068 s->last_dc[1] =
3069 s->last_dc[2] = 128 << s->intra_dc_precision;
3070 }
3071 }
3072 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
3073 s->mbintra_table[mb_xy]=1;
3074
3075 if ( (s->flags&CODEC_FLAG_PSNR)
3076 || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor
3077 || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
3078 uint8_t *dest_y, *dest_cb, *dest_cr;
3079 int dct_linesize, dct_offset;
3080 op_pixels_func (*op_pix)[4];
3081 qpel_mc_func (*op_qpix)[16];
3082 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3083 const int uvlinesize = s->current_picture.f->linesize[1];
3084 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
3085 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
3086
3087 /* avoid copy if macroblock skipped in last frame too */
3088 /* skip only during decoding as we might trash the buffers during encoding a bit */
3089 if(!s->encoding){
3090 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
3091
3092 if (s->mb_skipped) {
3093 s->mb_skipped= 0;
3094 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
3095 *mbskip_ptr = 1;
3096 } else if(!s->current_picture.reference) {
3097 *mbskip_ptr = 1;
3098 } else{
3099 *mbskip_ptr = 0; /* not skipped */
3100 }
3101 }
3102
3103 dct_linesize = linesize << s->interlaced_dct;
3104 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
3105
3106 if(readable){
3107 dest_y= s->dest[0];
3108 dest_cb= s->dest[1];
3109 dest_cr= s->dest[2];
3110 }else{
3111 dest_y = s->b_scratchpad;
3112 dest_cb= s->b_scratchpad+16*linesize;
3113 dest_cr= s->b_scratchpad+32*linesize;
3114 }
3115
3116 if (!s->mb_intra) {
3117 /* motion handling */
3118 /* decoding or more than one mb_type (MC was already done otherwise) */
3119 if(!s->encoding){
3120
3121 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
3122 if (s->mv_dir & MV_DIR_FORWARD) {
3123 ff_thread_await_progress(&s->last_picture_ptr->tf,
3124 ff_mpv_lowest_referenced_row(s, 0),
3125 0);
3126 }
3127 if (s->mv_dir & MV_DIR_BACKWARD) {
3128 ff_thread_await_progress(&s->next_picture_ptr->tf,
3129 ff_mpv_lowest_referenced_row(s, 1),
3130 0);
3131 }
3132 }
3133
3134 if(lowres_flag){
3135 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
3136
3137 if (s->mv_dir & MV_DIR_FORWARD) {
3138 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
3139 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
3140 }
3141 if (s->mv_dir & MV_DIR_BACKWARD) {
3142 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
3143 }
3144 }else{
3145 op_qpix = s->me.qpel_put;
3146 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
3147 op_pix = s->hdsp.put_pixels_tab;
3148 }else{
3149 op_pix = s->hdsp.put_no_rnd_pixels_tab;
3150 }
3151 if (s->mv_dir & MV_DIR_FORWARD) {
3152 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
3153 op_pix = s->hdsp.avg_pixels_tab;
3154 op_qpix= s->me.qpel_avg;
3155 }
3156 if (s->mv_dir & MV_DIR_BACKWARD) {
3157 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
3158 }
3159 }
3160 }
3161
3162 /* skip dequant / idct if we are really late ;) */
3163 if(s->avctx->skip_idct){
3164 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
3165 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
3166 || s->avctx->skip_idct >= AVDISCARD_ALL)
3167 goto skip_idct;
3168 }
3169
3170 /* add dct residue */
3171 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
3172 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
3173 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3174 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3175 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3176 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3177
3178 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3179 if (s->chroma_y_shift){
3180 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3181 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3182 }else{
3183 dct_linesize >>= 1;
3184 dct_offset >>=1;
3185 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3186 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3187 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3188 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3189 }
3190 }
3191 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3192 add_dct(s, block[0], 0, dest_y , dct_linesize);
3193 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3194 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3195 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3196
3197 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3198 if(s->chroma_y_shift){//Chroma420
3199 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3200 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3201 }else{
3202 //chroma422
3203 dct_linesize = uvlinesize << s->interlaced_dct;
3204 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3205
3206 add_dct(s, block[4], 4, dest_cb, dct_linesize);
3207 add_dct(s, block[5], 5, dest_cr, dct_linesize);
3208 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3209 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3210 if(!s->chroma_x_shift){//Chroma444
3211 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3212 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3213 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3214 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3215 }
3216 }
3217 }//fi gray
3218 }
3219 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3220 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3221 }
3222 } else {
3223 /* dct only in intra block */
3224 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
3225 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3226 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3227 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3228 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3229
3230 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3231 if(s->chroma_y_shift){
3232 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3233 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3234 }else{
3235 dct_offset >>=1;
3236 dct_linesize >>=1;
3237 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3238 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3239 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3240 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3241 }
3242 }
3243 }else{
3244 s->idsp.idct_put(dest_y, dct_linesize, block[0]);
3245 s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3246 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
3247 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3248
3249 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3250 if(s->chroma_y_shift){
3251 s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
3252 s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
3253 }else{
3254
3255 dct_linesize = uvlinesize << s->interlaced_dct;
3256 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3257
3258 s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
3259 s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
3260 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3261 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3262 if(!s->chroma_x_shift){//Chroma444
3263 s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3264 s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3265 s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3266 s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3267 }
3268 }
3269 }//gray
3270 }
3271 }
3272skip_idct:
3273 if(!readable){
3274 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3275 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3276 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3277 }
3278 }
3279}
3280
3281void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
3282{
3283#if !CONFIG_SMALL
3284 if(s->out_format == FMT_MPEG1) {
3285 if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 1);
3286 else mpv_decode_mb_internal(s, block, 0, 1);
3287 } else
3288#endif
3289 if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 0);
3290 else mpv_decode_mb_internal(s, block, 0, 0);
3291}
3292
3293void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3294{
3295 ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
3296 s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3297 s->first_field, s->low_delay);
3298}
3299
3300void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3301 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3302 const int uvlinesize = s->current_picture.f->linesize[1];
3303 const int mb_size= 4 - s->avctx->lowres;
3304
3305 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3306 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3307 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3308 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3309 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3310 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3311 //block_index is not used by mpeg2, so it is not affected by chroma_format
3312
3313 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
3314 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3315 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3316
3317 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3318 {
3319 if(s->picture_structure==PICT_FRAME){
3320 s->dest[0] += s->mb_y * linesize << mb_size;
3321 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3322 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3323 }else{
3324 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3325 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3326 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3327 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3328 }
3329 }
3330}
3331
3332/**
3333 * Permute an 8x8 block.
3334 * @param block the block which will be permuted according to the given permutation vector
3335 * @param permutation the permutation vector
3336 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3337 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3338 * (inverse) permutated to scantable order!
3339 */
3340void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3341{
3342 int i;
3343 int16_t temp[64];
3344
3345 if(last<=0) return;
3346 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3347
3348 for(i=0; i<=last; i++){
3349 const int j= scantable[i];
3350 temp[j]= block[j];
3351 block[j]=0;
3352 }
3353
3354 for(i=0; i<=last; i++){
3355 const int j= scantable[i];
3356 const int perm_j= permutation[j];
3357 block[perm_j]= temp[j];
3358 }
3359}
3360
3361void ff_mpeg_flush(AVCodecContext *avctx){
3362 int i;
3363 MpegEncContext *s = avctx->priv_data;
3364
3365 if (!s || !s->picture)
3366 return;
3367
3368 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3369 ff_mpeg_unref_picture(s, &s->picture[i]);
3370 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3371
3372 ff_mpeg_unref_picture(s, &s->current_picture);
3373 ff_mpeg_unref_picture(s, &s->last_picture);
3374 ff_mpeg_unref_picture(s, &s->next_picture);
3375
3376 s->mb_x= s->mb_y= 0;
3377 s->closed_gop= 0;
3378
3379 s->parse_context.state= -1;
3380 s->parse_context.frame_start_found= 0;
3381 s->parse_context.overread= 0;
3382 s->parse_context.overread_index= 0;
3383 s->parse_context.index= 0;
3384 s->parse_context.last_index= 0;
3385 s->bitstream_buffer_size=0;
3386 s->pp_time=0;
3387}
3388
3389/**
3390 * set qscale and update qscale dependent variables.
3391 */
3392void ff_set_qscale(MpegEncContext * s, int qscale)
3393{
3394 if (qscale < 1)
3395 qscale = 1;
3396 else if (qscale > 31)
3397 qscale = 31;
3398
3399 s->qscale = qscale;
3400 s->chroma_qscale= s->chroma_qscale_table[qscale];
3401
3402 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3403 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3404}
3405
3406void ff_mpv_report_decode_progress(MpegEncContext *s)
3407{
3408 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3409 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3410}