Imported Debian version 2.4.3~trusty1
[deb_ffmpeg.git] / ffmpeg / libavcodec / h263.c
CommitLineData
2ba45a60
DM
1/*
2 * H263/MPEG4 backend for encoder and decoder
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * H263+ support.
5 * Copyright (c) 2001 Juan J. Sierralta P
6 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of FFmpeg.
9 *
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25/**
26 * @file
27 * h263/mpeg4 codec.
28 */
29
30#include <limits.h>
31
32#include "avcodec.h"
33#include "mpegvideo.h"
34#include "h263.h"
35#include "h263data.h"
36#include "mathops.h"
37#include "mpegutils.h"
38#include "unary.h"
39#include "flv.h"
40#include "mpeg4video.h"
41
42
43uint8_t ff_h263_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3];
44
45
46void ff_h263_update_motion_val(MpegEncContext * s){
47 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
48 //FIXME a lot of that is only needed for !low_delay
49 const int wrap = s->b8_stride;
50 const int xy = s->block_index[0];
51
52 s->current_picture.mbskip_table[mb_xy] = s->mb_skipped;
53
54 if(s->mv_type != MV_TYPE_8X8){
55 int motion_x, motion_y;
56 if (s->mb_intra) {
57 motion_x = 0;
58 motion_y = 0;
59 } else if (s->mv_type == MV_TYPE_16X16) {
60 motion_x = s->mv[0][0][0];
61 motion_y = s->mv[0][0][1];
62 } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
63 int i;
64 motion_x = s->mv[0][0][0] + s->mv[0][1][0];
65 motion_y = s->mv[0][0][1] + s->mv[0][1][1];
66 motion_x = (motion_x>>1) | (motion_x&1);
67 for(i=0; i<2; i++){
68 s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
69 s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
70 }
71 s->current_picture.ref_index[0][4*mb_xy ] =
72 s->current_picture.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
73 s->current_picture.ref_index[0][4*mb_xy + 2] =
74 s->current_picture.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
75 }
76
77 /* no update if 8X8 because it has been done during parsing */
78 s->current_picture.motion_val[0][xy][0] = motion_x;
79 s->current_picture.motion_val[0][xy][1] = motion_y;
80 s->current_picture.motion_val[0][xy + 1][0] = motion_x;
81 s->current_picture.motion_val[0][xy + 1][1] = motion_y;
82 s->current_picture.motion_val[0][xy + wrap][0] = motion_x;
83 s->current_picture.motion_val[0][xy + wrap][1] = motion_y;
84 s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
85 s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
86 }
87
88 if(s->encoding){ //FIXME encoding MUST be cleaned up
89 if (s->mv_type == MV_TYPE_8X8)
90 s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
91 else if(s->mb_intra)
92 s->current_picture.mb_type[mb_xy] = MB_TYPE_INTRA;
93 else
94 s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
95 }
96}
97
98int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
99{
100 int x, y, wrap, a, c, pred_dc;
101 int16_t *dc_val;
102
103 /* find prediction */
104 if (n < 4) {
105 x = 2 * s->mb_x + (n & 1);
106 y = 2 * s->mb_y + ((n & 2) >> 1);
107 wrap = s->b8_stride;
108 dc_val = s->dc_val[0];
109 } else {
110 x = s->mb_x;
111 y = s->mb_y;
112 wrap = s->mb_stride;
113 dc_val = s->dc_val[n - 4 + 1];
114 }
115 /* B C
116 * A X
117 */
118 a = dc_val[(x - 1) + (y) * wrap];
119 c = dc_val[(x) + (y - 1) * wrap];
120
121 /* No prediction outside GOB boundary */
122 if(s->first_slice_line && n!=3){
123 if(n!=2) c= 1024;
124 if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024;
125 }
126 /* just DC prediction */
127 if (a != 1024 && c != 1024)
128 pred_dc = (a + c) >> 1;
129 else if (a != 1024)
130 pred_dc = a;
131 else
132 pred_dc = c;
133
134 /* we assume pred is positive */
135 *dc_val_ptr = &dc_val[x + y * wrap];
136 return pred_dc;
137}
138
139void ff_h263_loop_filter(MpegEncContext * s){
140 int qp_c;
141 const int linesize = s->linesize;
142 const int uvlinesize= s->uvlinesize;
143 const int xy = s->mb_y * s->mb_stride + s->mb_x;
144 uint8_t *dest_y = s->dest[0];
145 uint8_t *dest_cb= s->dest[1];
146 uint8_t *dest_cr= s->dest[2];
147
148// if(s->pict_type==AV_PICTURE_TYPE_B && !s->readable) return;
149
150 /*
151 Diag Top
152 Left Center
153 */
154 if (!IS_SKIP(s->current_picture.mb_type[xy])) {
155 qp_c= s->qscale;
156 s->h263dsp.h263_v_loop_filter(dest_y + 8 * linesize, linesize, qp_c);
157 s->h263dsp.h263_v_loop_filter(dest_y + 8 * linesize + 8, linesize, qp_c);
158 }else
159 qp_c= 0;
160
161 if(s->mb_y){
162 int qp_dt, qp_tt, qp_tc;
163
164 if (IS_SKIP(s->current_picture.mb_type[xy - s->mb_stride]))
165 qp_tt=0;
166 else
167 qp_tt = s->current_picture.qscale_table[xy - s->mb_stride];
168
169 if(qp_c)
170 qp_tc= qp_c;
171 else
172 qp_tc= qp_tt;
173
174 if(qp_tc){
175 const int chroma_qp= s->chroma_qscale_table[qp_tc];
176 s->h263dsp.h263_v_loop_filter(dest_y, linesize, qp_tc);
177 s->h263dsp.h263_v_loop_filter(dest_y + 8, linesize, qp_tc);
178
179 s->h263dsp.h263_v_loop_filter(dest_cb, uvlinesize, chroma_qp);
180 s->h263dsp.h263_v_loop_filter(dest_cr, uvlinesize, chroma_qp);
181 }
182
183 if(qp_tt)
184 s->h263dsp.h263_h_loop_filter(dest_y - 8 * linesize + 8, linesize, qp_tt);
185
186 if(s->mb_x){
187 if (qp_tt || IS_SKIP(s->current_picture.mb_type[xy - 1 - s->mb_stride]))
188 qp_dt= qp_tt;
189 else
190 qp_dt = s->current_picture.qscale_table[xy - 1 - s->mb_stride];
191
192 if(qp_dt){
193 const int chroma_qp= s->chroma_qscale_table[qp_dt];
194 s->h263dsp.h263_h_loop_filter(dest_y - 8 * linesize, linesize, qp_dt);
195 s->h263dsp.h263_h_loop_filter(dest_cb - 8 * uvlinesize, uvlinesize, chroma_qp);
196 s->h263dsp.h263_h_loop_filter(dest_cr - 8 * uvlinesize, uvlinesize, chroma_qp);
197 }
198 }
199 }
200
201 if(qp_c){
202 s->h263dsp.h263_h_loop_filter(dest_y + 8, linesize, qp_c);
203 if(s->mb_y + 1 == s->mb_height)
204 s->h263dsp.h263_h_loop_filter(dest_y + 8 * linesize + 8, linesize, qp_c);
205 }
206
207 if(s->mb_x){
208 int qp_lc;
209 if (qp_c || IS_SKIP(s->current_picture.mb_type[xy - 1]))
210 qp_lc= qp_c;
211 else
212 qp_lc = s->current_picture.qscale_table[xy - 1];
213
214 if(qp_lc){
215 s->h263dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
216 if(s->mb_y + 1 == s->mb_height){
217 const int chroma_qp= s->chroma_qscale_table[qp_lc];
218 s->h263dsp.h263_h_loop_filter(dest_y + 8 * linesize, linesize, qp_lc);
219 s->h263dsp.h263_h_loop_filter(dest_cb, uvlinesize, chroma_qp);
220 s->h263dsp.h263_h_loop_filter(dest_cr, uvlinesize, chroma_qp);
221 }
222 }
223 }
224}
225
226void ff_h263_pred_acdc(MpegEncContext * s, int16_t *block, int n)
227{
228 int x, y, wrap, a, c, pred_dc, scale, i;
229 int16_t *dc_val, *ac_val, *ac_val1;
230
231 /* find prediction */
232 if (n < 4) {
233 x = 2 * s->mb_x + (n & 1);
234 y = 2 * s->mb_y + (n>> 1);
235 wrap = s->b8_stride;
236 dc_val = s->dc_val[0];
237 ac_val = s->ac_val[0][0];
238 scale = s->y_dc_scale;
239 } else {
240 x = s->mb_x;
241 y = s->mb_y;
242 wrap = s->mb_stride;
243 dc_val = s->dc_val[n - 4 + 1];
244 ac_val = s->ac_val[n - 4 + 1][0];
245 scale = s->c_dc_scale;
246 }
247
248 ac_val += ((y) * wrap + (x)) * 16;
249 ac_val1 = ac_val;
250
251 /* B C
252 * A X
253 */
254 a = dc_val[(x - 1) + (y) * wrap];
255 c = dc_val[(x) + (y - 1) * wrap];
256
257 /* No prediction outside GOB boundary */
258 if(s->first_slice_line && n!=3){
259 if(n!=2) c= 1024;
260 if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024;
261 }
262
263 if (s->ac_pred) {
264 pred_dc = 1024;
265 if (s->h263_aic_dir) {
266 /* left prediction */
267 if (a != 1024) {
268 ac_val -= 16;
269 for(i=1;i<8;i++) {
270 block[s->idsp.idct_permutation[i << 3]] += ac_val[i];
271 }
272 pred_dc = a;
273 }
274 } else {
275 /* top prediction */
276 if (c != 1024) {
277 ac_val -= 16 * wrap;
278 for(i=1;i<8;i++) {
279 block[s->idsp.idct_permutation[i]] += ac_val[i + 8];
280 }
281 pred_dc = c;
282 }
283 }
284 } else {
285 /* just DC prediction */
286 if (a != 1024 && c != 1024)
287 pred_dc = (a + c) >> 1;
288 else if (a != 1024)
289 pred_dc = a;
290 else
291 pred_dc = c;
292 }
293
294 /* we assume pred is positive */
295 block[0]=block[0]*scale + pred_dc;
296
297 if (block[0] < 0)
298 block[0] = 0;
299 else
300 block[0] |= 1;
301
302 /* Update AC/DC tables */
303 dc_val[(x) + (y) * wrap] = block[0];
304
305 /* left copy */
306 for(i=1;i<8;i++)
307 ac_val1[i] = block[s->idsp.idct_permutation[i << 3]];
308 /* top copy */
309 for(i=1;i<8;i++)
310 ac_val1[8 + i] = block[s->idsp.idct_permutation[i]];
311}
312
313int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
314 int *px, int *py)
315{
316 int wrap;
317 int16_t *A, *B, *C, (*mot_val)[2];
318 static const int off[4]= {2, 1, 1, -1};
319
320 wrap = s->b8_stride;
321 mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
322
323 A = mot_val[ - 1];
324 /* special case for first (slice) line */
325 if (s->first_slice_line && block<3) {
326 // we can't just change some MVs to simulate that as we need them for the B frames (and ME)
327 // and if we ever support non rectangular objects than we need to do a few ifs here anyway :(
328 if(block==0){ //most common case
329 if(s->mb_x == s->resync_mb_x){ //rare
330 *px= *py = 0;
331 }else if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
332 C = mot_val[off[block] - wrap];
333 if(s->mb_x==0){
334 *px = C[0];
335 *py = C[1];
336 }else{
337 *px = mid_pred(A[0], 0, C[0]);
338 *py = mid_pred(A[1], 0, C[1]);
339 }
340 }else{
341 *px = A[0];
342 *py = A[1];
343 }
344 }else if(block==1){
345 if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
346 C = mot_val[off[block] - wrap];
347 *px = mid_pred(A[0], 0, C[0]);
348 *py = mid_pred(A[1], 0, C[1]);
349 }else{
350 *px = A[0];
351 *py = A[1];
352 }
353 }else{ /* block==2*/
354 B = mot_val[ - wrap];
355 C = mot_val[off[block] - wrap];
356 if(s->mb_x == s->resync_mb_x) //rare
357 A[0]=A[1]=0;
358
359 *px = mid_pred(A[0], B[0], C[0]);
360 *py = mid_pred(A[1], B[1], C[1]);
361 }
362 } else {
363 B = mot_val[ - wrap];
364 C = mot_val[off[block] - wrap];
365 *px = mid_pred(A[0], B[0], C[0]);
366 *py = mid_pred(A[1], B[1], C[1]);
367 }
368 return *mot_val;
369}
370
371
372/**
373 * Get the GOB height based on picture height.
374 */
375int ff_h263_get_gob_height(MpegEncContext *s){
376 if (s->height <= 400)
377 return 1;
378 else if (s->height <= 800)
379 return 2;
380 else
381 return 4;
382}