Commit | Line | Data |
---|---|---|
f6fa7814 DM |
1 | /* |
2 | * VC-1 and WMV3 decoder | |
3 | * Copyright (c) 2011 Mashiat Sarker Shakkhar | |
4 | * Copyright (c) 2006-2007 Konstantin Shishkov | |
5 | * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer | |
6 | * | |
7 | * This file is part of FFmpeg. | |
8 | * | |
9 | * FFmpeg is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; either | |
12 | * version 2.1 of the License, or (at your option) any later version. | |
13 | * | |
14 | * FFmpeg is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with FFmpeg; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
22 | */ | |
23 | ||
24 | /** | |
25 | * @file | |
26 | * VC-1 and WMV3 block decoding routines | |
27 | */ | |
28 | ||
29 | #include "mathops.h" | |
30 | #include "mpegutils.h" | |
31 | #include "mpegvideo.h" | |
32 | #include "vc1.h" | |
33 | #include "vc1_pred.h" | |
34 | #include "vc1data.h" | |
35 | ||
36 | static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir) | |
37 | { | |
38 | int scaledvalue, refdist; | |
39 | int scalesame1, scalesame2; | |
40 | int scalezone1_x, zone1offset_x; | |
41 | int table_index = dir ^ v->second_field; | |
42 | ||
43 | if (v->s.pict_type != AV_PICTURE_TYPE_B) | |
44 | refdist = v->refdist; | |
45 | else | |
46 | refdist = dir ? v->brfd : v->frfd; | |
47 | if (refdist > 3) | |
48 | refdist = 3; | |
49 | scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist]; | |
50 | scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist]; | |
51 | scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist]; | |
52 | zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist]; | |
53 | ||
54 | if (FFABS(n) > 255) | |
55 | scaledvalue = n; | |
56 | else { | |
57 | if (FFABS(n) < scalezone1_x) | |
58 | scaledvalue = (n * scalesame1) >> 8; | |
59 | else { | |
60 | if (n < 0) | |
61 | scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x; | |
62 | else | |
63 | scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x; | |
64 | } | |
65 | } | |
66 | return av_clip(scaledvalue, -v->range_x, v->range_x - 1); | |
67 | } | |
68 | ||
69 | static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir) | |
70 | { | |
71 | int scaledvalue, refdist; | |
72 | int scalesame1, scalesame2; | |
73 | int scalezone1_y, zone1offset_y; | |
74 | int table_index = dir ^ v->second_field; | |
75 | ||
76 | if (v->s.pict_type != AV_PICTURE_TYPE_B) | |
77 | refdist = v->refdist; | |
78 | else | |
79 | refdist = dir ? v->brfd : v->frfd; | |
80 | if (refdist > 3) | |
81 | refdist = 3; | |
82 | scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist]; | |
83 | scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist]; | |
84 | scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist]; | |
85 | zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist]; | |
86 | ||
87 | if (FFABS(n) > 63) | |
88 | scaledvalue = n; | |
89 | else { | |
90 | if (FFABS(n) < scalezone1_y) | |
91 | scaledvalue = (n * scalesame1) >> 8; | |
92 | else { | |
93 | if (n < 0) | |
94 | scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y; | |
95 | else | |
96 | scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y; | |
97 | } | |
98 | } | |
99 | ||
100 | if (v->cur_field_type && !v->ref_field_type[dir]) | |
101 | return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2); | |
102 | else | |
103 | return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1); | |
104 | } | |
105 | ||
106 | static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */) | |
107 | { | |
108 | int scalezone1_x, zone1offset_x; | |
109 | int scaleopp1, scaleopp2, brfd; | |
110 | int scaledvalue; | |
111 | ||
112 | brfd = FFMIN(v->brfd, 3); | |
113 | scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd]; | |
114 | zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd]; | |
115 | scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd]; | |
116 | scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd]; | |
117 | ||
118 | if (FFABS(n) > 255) | |
119 | scaledvalue = n; | |
120 | else { | |
121 | if (FFABS(n) < scalezone1_x) | |
122 | scaledvalue = (n * scaleopp1) >> 8; | |
123 | else { | |
124 | if (n < 0) | |
125 | scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x; | |
126 | else | |
127 | scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x; | |
128 | } | |
129 | } | |
130 | return av_clip(scaledvalue, -v->range_x, v->range_x - 1); | |
131 | } | |
132 | ||
133 | static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir) | |
134 | { | |
135 | int scalezone1_y, zone1offset_y; | |
136 | int scaleopp1, scaleopp2, brfd; | |
137 | int scaledvalue; | |
138 | ||
139 | brfd = FFMIN(v->brfd, 3); | |
140 | scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd]; | |
141 | zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd]; | |
142 | scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd]; | |
143 | scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd]; | |
144 | ||
145 | if (FFABS(n) > 63) | |
146 | scaledvalue = n; | |
147 | else { | |
148 | if (FFABS(n) < scalezone1_y) | |
149 | scaledvalue = (n * scaleopp1) >> 8; | |
150 | else { | |
151 | if (n < 0) | |
152 | scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y; | |
153 | else | |
154 | scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y; | |
155 | } | |
156 | } | |
157 | if (v->cur_field_type && !v->ref_field_type[dir]) { | |
158 | return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2); | |
159 | } else { | |
160 | return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1); | |
161 | } | |
162 | } | |
163 | ||
164 | static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */, | |
165 | int dim, int dir) | |
166 | { | |
167 | int brfd, scalesame; | |
168 | int hpel = 1 - v->s.quarter_sample; | |
169 | ||
170 | n >>= hpel; | |
171 | if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) { | |
172 | if (dim) | |
173 | n = scaleforsame_y(v, i, n, dir) << hpel; | |
174 | else | |
175 | n = scaleforsame_x(v, n, dir) << hpel; | |
176 | return n; | |
177 | } | |
178 | brfd = FFMIN(v->brfd, 3); | |
179 | scalesame = ff_vc1_b_field_mvpred_scales[0][brfd]; | |
180 | ||
181 | n = (n * scalesame >> 8) << hpel; | |
182 | return n; | |
183 | } | |
184 | ||
185 | static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */, | |
186 | int dim, int dir) | |
187 | { | |
188 | int refdist, scaleopp; | |
189 | int hpel = 1 - v->s.quarter_sample; | |
190 | ||
191 | n >>= hpel; | |
192 | if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) { | |
193 | if (dim) | |
194 | n = scaleforopp_y(v, n, dir) << hpel; | |
195 | else | |
196 | n = scaleforopp_x(v, n) << hpel; | |
197 | return n; | |
198 | } | |
199 | if (v->s.pict_type != AV_PICTURE_TYPE_B) | |
200 | refdist = FFMIN(v->refdist, 3); | |
201 | else | |
202 | refdist = dir ? v->brfd : v->frfd; | |
203 | scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist]; | |
204 | ||
205 | n = (n * scaleopp >> 8) << hpel; | |
206 | return n; | |
207 | } | |
208 | ||
209 | /** Predict and set motion vector | |
210 | */ | |
211 | void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, | |
212 | int mv1, int r_x, int r_y, uint8_t* is_intra, | |
213 | int pred_flag, int dir) | |
214 | { | |
215 | MpegEncContext *s = &v->s; | |
216 | int xy, wrap, off = 0; | |
217 | int16_t *A, *B, *C; | |
218 | int px, py; | |
219 | int sum; | |
220 | int mixedmv_pic, num_samefield = 0, num_oppfield = 0; | |
221 | int opposite, a_f, b_f, c_f; | |
222 | int16_t field_predA[2]; | |
223 | int16_t field_predB[2]; | |
224 | int16_t field_predC[2]; | |
225 | int a_valid, b_valid, c_valid; | |
226 | int hybridmv_thresh, y_bias = 0; | |
227 | ||
228 | if (v->mv_mode == MV_PMODE_MIXED_MV || | |
229 | ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV))) | |
230 | mixedmv_pic = 1; | |
231 | else | |
232 | mixedmv_pic = 0; | |
233 | /* scale MV difference to be quad-pel */ | |
234 | dmv_x <<= 1 - s->quarter_sample; | |
235 | dmv_y <<= 1 - s->quarter_sample; | |
236 | ||
237 | wrap = s->b8_stride; | |
238 | xy = s->block_index[n]; | |
239 | ||
240 | if (s->mb_intra) { | |
241 | s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0; | |
242 | s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0; | |
243 | s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0; | |
244 | s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0; | |
245 | if (mv1) { /* duplicate motion data for 1-MV block */ | |
246 | s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0; | |
247 | s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0; | |
248 | s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0; | |
249 | s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0; | |
250 | s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0; | |
251 | s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0; | |
252 | v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0; | |
253 | s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0; | |
254 | s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0; | |
255 | s->current_picture.motion_val[1][xy + wrap][0] = 0; | |
256 | s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0; | |
257 | s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0; | |
258 | s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0; | |
259 | } | |
260 | return; | |
261 | } | |
262 | ||
263 | C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off]; | |
264 | A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off]; | |
265 | if (mv1) { | |
266 | if (v->field_mode && mixedmv_pic) | |
267 | off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2; | |
268 | else | |
269 | off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2; | |
270 | } else { | |
271 | //in 4-MV mode different blocks have different B predictor position | |
272 | switch (n) { | |
273 | case 0: | |
274 | off = (s->mb_x > 0) ? -1 : 1; | |
275 | break; | |
276 | case 1: | |
277 | off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1; | |
278 | break; | |
279 | case 2: | |
280 | off = 1; | |
281 | break; | |
282 | case 3: | |
283 | off = -1; | |
284 | } | |
285 | } | |
286 | B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off]; | |
287 | ||
288 | a_valid = !s->first_slice_line || (n == 2 || n == 3); | |
289 | b_valid = a_valid && (s->mb_width > 1); | |
290 | c_valid = s->mb_x || (n == 1 || n == 3); | |
291 | if (v->field_mode) { | |
292 | a_valid = a_valid && !is_intra[xy - wrap]; | |
293 | b_valid = b_valid && !is_intra[xy - wrap + off]; | |
294 | c_valid = c_valid && !is_intra[xy - 1]; | |
295 | } | |
296 | ||
297 | if (a_valid) { | |
298 | a_f = v->mv_f[dir][xy - wrap + v->blocks_off]; | |
299 | num_oppfield += a_f; | |
300 | num_samefield += 1 - a_f; | |
301 | field_predA[0] = A[0]; | |
302 | field_predA[1] = A[1]; | |
303 | } else { | |
304 | field_predA[0] = field_predA[1] = 0; | |
305 | a_f = 0; | |
306 | } | |
307 | if (b_valid) { | |
308 | b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off]; | |
309 | num_oppfield += b_f; | |
310 | num_samefield += 1 - b_f; | |
311 | field_predB[0] = B[0]; | |
312 | field_predB[1] = B[1]; | |
313 | } else { | |
314 | field_predB[0] = field_predB[1] = 0; | |
315 | b_f = 0; | |
316 | } | |
317 | if (c_valid) { | |
318 | c_f = v->mv_f[dir][xy - 1 + v->blocks_off]; | |
319 | num_oppfield += c_f; | |
320 | num_samefield += 1 - c_f; | |
321 | field_predC[0] = C[0]; | |
322 | field_predC[1] = C[1]; | |
323 | } else { | |
324 | field_predC[0] = field_predC[1] = 0; | |
325 | c_f = 0; | |
326 | } | |
327 | ||
328 | if (v->field_mode) { | |
329 | if (!v->numref) | |
330 | // REFFIELD determines if the last field or the second-last field is | |
331 | // to be used as reference | |
332 | opposite = 1 - v->reffield; | |
333 | else { | |
334 | if (num_samefield <= num_oppfield) | |
335 | opposite = 1 - pred_flag; | |
336 | else | |
337 | opposite = pred_flag; | |
338 | } | |
339 | } else | |
340 | opposite = 0; | |
341 | if (opposite) { | |
342 | if (a_valid && !a_f) { | |
343 | field_predA[0] = scaleforopp(v, field_predA[0], 0, dir); | |
344 | field_predA[1] = scaleforopp(v, field_predA[1], 1, dir); | |
345 | } | |
346 | if (b_valid && !b_f) { | |
347 | field_predB[0] = scaleforopp(v, field_predB[0], 0, dir); | |
348 | field_predB[1] = scaleforopp(v, field_predB[1], 1, dir); | |
349 | } | |
350 | if (c_valid && !c_f) { | |
351 | field_predC[0] = scaleforopp(v, field_predC[0], 0, dir); | |
352 | field_predC[1] = scaleforopp(v, field_predC[1], 1, dir); | |
353 | } | |
354 | v->mv_f[dir][xy + v->blocks_off] = 1; | |
355 | v->ref_field_type[dir] = !v->cur_field_type; | |
356 | } else { | |
357 | if (a_valid && a_f) { | |
358 | field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir); | |
359 | field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir); | |
360 | } | |
361 | if (b_valid && b_f) { | |
362 | field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir); | |
363 | field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir); | |
364 | } | |
365 | if (c_valid && c_f) { | |
366 | field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir); | |
367 | field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir); | |
368 | } | |
369 | v->mv_f[dir][xy + v->blocks_off] = 0; | |
370 | v->ref_field_type[dir] = v->cur_field_type; | |
371 | } | |
372 | ||
373 | if (a_valid) { | |
374 | px = field_predA[0]; | |
375 | py = field_predA[1]; | |
376 | } else if (c_valid) { | |
377 | px = field_predC[0]; | |
378 | py = field_predC[1]; | |
379 | } else if (b_valid) { | |
380 | px = field_predB[0]; | |
381 | py = field_predB[1]; | |
382 | } else { | |
383 | px = 0; | |
384 | py = 0; | |
385 | } | |
386 | ||
387 | if (num_samefield + num_oppfield > 1) { | |
388 | px = mid_pred(field_predA[0], field_predB[0], field_predC[0]); | |
389 | py = mid_pred(field_predA[1], field_predB[1], field_predC[1]); | |
390 | } | |
391 | ||
392 | /* Pullback MV as specified in 8.3.5.3.4 */ | |
393 | if (!v->field_mode) { | |
394 | int qx, qy, X, Y; | |
395 | qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0); | |
396 | qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0); | |
397 | X = (s->mb_width << 6) - 4; | |
398 | Y = (s->mb_height << 6) - 4; | |
399 | if (mv1) { | |
400 | if (qx + px < -60) px = -60 - qx; | |
401 | if (qy + py < -60) py = -60 - qy; | |
402 | } else { | |
403 | if (qx + px < -28) px = -28 - qx; | |
404 | if (qy + py < -28) py = -28 - qy; | |
405 | } | |
406 | if (qx + px > X) px = X - qx; | |
407 | if (qy + py > Y) py = Y - qy; | |
408 | } | |
409 | ||
410 | if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) { | |
411 | /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */ | |
412 | hybridmv_thresh = 32; | |
413 | if (a_valid && c_valid) { | |
414 | if (is_intra[xy - wrap]) | |
415 | sum = FFABS(px) + FFABS(py); | |
416 | else | |
417 | sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]); | |
418 | if (sum > hybridmv_thresh) { | |
419 | if (get_bits1(&s->gb)) { // read HYBRIDPRED bit | |
420 | px = field_predA[0]; | |
421 | py = field_predA[1]; | |
422 | } else { | |
423 | px = field_predC[0]; | |
424 | py = field_predC[1]; | |
425 | } | |
426 | } else { | |
427 | if (is_intra[xy - 1]) | |
428 | sum = FFABS(px) + FFABS(py); | |
429 | else | |
430 | sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]); | |
431 | if (sum > hybridmv_thresh) { | |
432 | if (get_bits1(&s->gb)) { | |
433 | px = field_predA[0]; | |
434 | py = field_predA[1]; | |
435 | } else { | |
436 | px = field_predC[0]; | |
437 | py = field_predC[1]; | |
438 | } | |
439 | } | |
440 | } | |
441 | } | |
442 | } | |
443 | ||
444 | if (v->field_mode && v->numref) | |
445 | r_y >>= 1; | |
446 | if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0) | |
447 | y_bias = 1; | |
448 | /* store MV using signed modulus of MV range defined in 4.11 */ | |
449 | s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x; | |
450 | s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias; | |
451 | if (mv1) { /* duplicate motion data for 1-MV block */ | |
452 | s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0]; | |
453 | s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1]; | |
454 | s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0]; | |
455 | s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1]; | |
456 | s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0]; | |
457 | s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1]; | |
458 | v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off]; | |
459 | v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off]; | |
460 | } | |
461 | } | |
462 | ||
463 | /** Predict and set motion vector for interlaced frame picture MBs | |
464 | */ | |
465 | void ff_vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y, | |
466 | int mvn, int r_x, int r_y, uint8_t* is_intra, int dir) | |
467 | { | |
468 | MpegEncContext *s = &v->s; | |
469 | int xy, wrap, off = 0; | |
470 | int A[2], B[2], C[2]; | |
471 | int px = 0, py = 0; | |
472 | int a_valid = 0, b_valid = 0, c_valid = 0; | |
473 | int field_a, field_b, field_c; // 0: same, 1: opposit | |
474 | int total_valid, num_samefield, num_oppfield; | |
475 | int pos_c, pos_b, n_adj; | |
476 | ||
477 | wrap = s->b8_stride; | |
478 | xy = s->block_index[n]; | |
479 | ||
480 | if (s->mb_intra) { | |
481 | s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0; | |
482 | s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0; | |
483 | s->current_picture.motion_val[1][xy][0] = 0; | |
484 | s->current_picture.motion_val[1][xy][1] = 0; | |
485 | if (mvn == 1) { /* duplicate motion data for 1-MV block */ | |
486 | s->current_picture.motion_val[0][xy + 1][0] = 0; | |
487 | s->current_picture.motion_val[0][xy + 1][1] = 0; | |
488 | s->current_picture.motion_val[0][xy + wrap][0] = 0; | |
489 | s->current_picture.motion_val[0][xy + wrap][1] = 0; | |
490 | s->current_picture.motion_val[0][xy + wrap + 1][0] = 0; | |
491 | s->current_picture.motion_val[0][xy + wrap + 1][1] = 0; | |
492 | v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0; | |
493 | s->current_picture.motion_val[1][xy + 1][0] = 0; | |
494 | s->current_picture.motion_val[1][xy + 1][1] = 0; | |
495 | s->current_picture.motion_val[1][xy + wrap][0] = 0; | |
496 | s->current_picture.motion_val[1][xy + wrap][1] = 0; | |
497 | s->current_picture.motion_val[1][xy + wrap + 1][0] = 0; | |
498 | s->current_picture.motion_val[1][xy + wrap + 1][1] = 0; | |
499 | } | |
500 | return; | |
501 | } | |
502 | ||
503 | off = ((n == 0) || (n == 1)) ? 1 : -1; | |
504 | /* predict A */ | |
505 | if (s->mb_x || (n == 1) || (n == 3)) { | |
506 | if ((v->blk_mv_type[xy]) // current block (MB) has a field MV | |
507 | || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV | |
508 | A[0] = s->current_picture.motion_val[dir][xy - 1][0]; | |
509 | A[1] = s->current_picture.motion_val[dir][xy - 1][1]; | |
510 | a_valid = 1; | |
511 | } else { // current block has frame mv and cand. has field MV (so average) | |
512 | A[0] = (s->current_picture.motion_val[dir][xy - 1][0] | |
513 | + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1; | |
514 | A[1] = (s->current_picture.motion_val[dir][xy - 1][1] | |
515 | + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1; | |
516 | a_valid = 1; | |
517 | } | |
518 | if (!(n & 1) && v->is_intra[s->mb_x - 1]) { | |
519 | a_valid = 0; | |
520 | A[0] = A[1] = 0; | |
521 | } | |
522 | } else | |
523 | A[0] = A[1] = 0; | |
524 | /* Predict B and C */ | |
525 | B[0] = B[1] = C[0] = C[1] = 0; | |
526 | if (n == 0 || n == 1 || v->blk_mv_type[xy]) { | |
527 | if (!s->first_slice_line) { | |
528 | if (!v->is_intra[s->mb_x - s->mb_stride]) { | |
529 | b_valid = 1; | |
530 | n_adj = n | 2; | |
531 | pos_b = s->block_index[n_adj] - 2 * wrap; | |
532 | if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) { | |
533 | n_adj = (n & 2) | (n & 1); | |
534 | } | |
535 | B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0]; | |
536 | B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1]; | |
537 | if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) { | |
538 | B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1; | |
539 | B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1; | |
540 | } | |
541 | } | |
542 | if (s->mb_width > 1) { | |
543 | if (!v->is_intra[s->mb_x - s->mb_stride + 1]) { | |
544 | c_valid = 1; | |
545 | n_adj = 2; | |
546 | pos_c = s->block_index[2] - 2 * wrap + 2; | |
547 | if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) { | |
548 | n_adj = n & 2; | |
549 | } | |
550 | C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0]; | |
551 | C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1]; | |
552 | if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) { | |
553 | C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1; | |
554 | C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1; | |
555 | } | |
556 | if (s->mb_x == s->mb_width - 1) { | |
557 | if (!v->is_intra[s->mb_x - s->mb_stride - 1]) { | |
558 | c_valid = 1; | |
559 | n_adj = 3; | |
560 | pos_c = s->block_index[3] - 2 * wrap - 2; | |
561 | if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) { | |
562 | n_adj = n | 1; | |
563 | } | |
564 | C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0]; | |
565 | C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1]; | |
566 | if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) { | |
567 | C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1; | |
568 | C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1; | |
569 | } | |
570 | } else | |
571 | c_valid = 0; | |
572 | } | |
573 | } | |
574 | } | |
575 | } | |
576 | } else { | |
577 | pos_b = s->block_index[1]; | |
578 | b_valid = 1; | |
579 | B[0] = s->current_picture.motion_val[dir][pos_b][0]; | |
580 | B[1] = s->current_picture.motion_val[dir][pos_b][1]; | |
581 | pos_c = s->block_index[0]; | |
582 | c_valid = 1; | |
583 | C[0] = s->current_picture.motion_val[dir][pos_c][0]; | |
584 | C[1] = s->current_picture.motion_val[dir][pos_c][1]; | |
585 | } | |
586 | ||
587 | total_valid = a_valid + b_valid + c_valid; | |
588 | // check if predictor A is out of bounds | |
589 | if (!s->mb_x && !(n == 1 || n == 3)) { | |
590 | A[0] = A[1] = 0; | |
591 | } | |
592 | // check if predictor B is out of bounds | |
593 | if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) { | |
594 | B[0] = B[1] = C[0] = C[1] = 0; | |
595 | } | |
596 | if (!v->blk_mv_type[xy]) { | |
597 | if (s->mb_width == 1) { | |
598 | px = B[0]; | |
599 | py = B[1]; | |
600 | } else { | |
601 | if (total_valid >= 2) { | |
602 | px = mid_pred(A[0], B[0], C[0]); | |
603 | py = mid_pred(A[1], B[1], C[1]); | |
604 | } else if (total_valid) { | |
605 | if (a_valid) { px = A[0]; py = A[1]; } | |
606 | else if (b_valid) { px = B[0]; py = B[1]; } | |
607 | else { px = C[0]; py = C[1]; } | |
608 | } | |
609 | } | |
610 | } else { | |
611 | if (a_valid) | |
612 | field_a = (A[1] & 4) ? 1 : 0; | |
613 | else | |
614 | field_a = 0; | |
615 | if (b_valid) | |
616 | field_b = (B[1] & 4) ? 1 : 0; | |
617 | else | |
618 | field_b = 0; | |
619 | if (c_valid) | |
620 | field_c = (C[1] & 4) ? 1 : 0; | |
621 | else | |
622 | field_c = 0; | |
623 | ||
624 | num_oppfield = field_a + field_b + field_c; | |
625 | num_samefield = total_valid - num_oppfield; | |
626 | if (total_valid == 3) { | |
627 | if ((num_samefield == 3) || (num_oppfield == 3)) { | |
628 | px = mid_pred(A[0], B[0], C[0]); | |
629 | py = mid_pred(A[1], B[1], C[1]); | |
630 | } else if (num_samefield >= num_oppfield) { | |
631 | /* take one MV from same field set depending on priority | |
632 | the check for B may not be necessary */ | |
633 | px = !field_a ? A[0] : B[0]; | |
634 | py = !field_a ? A[1] : B[1]; | |
635 | } else { | |
636 | px = field_a ? A[0] : B[0]; | |
637 | py = field_a ? A[1] : B[1]; | |
638 | } | |
639 | } else if (total_valid == 2) { | |
640 | if (num_samefield >= num_oppfield) { | |
641 | if (!field_a && a_valid) { | |
642 | px = A[0]; | |
643 | py = A[1]; | |
644 | } else if (!field_b && b_valid) { | |
645 | px = B[0]; | |
646 | py = B[1]; | |
647 | } else /*if (c_valid)*/ { | |
648 | av_assert1(c_valid); | |
649 | px = C[0]; | |
650 | py = C[1]; | |
651 | } | |
652 | } else { | |
653 | if (field_a && a_valid) { | |
654 | px = A[0]; | |
655 | py = A[1]; | |
656 | } else /*if (field_b && b_valid)*/ { | |
657 | av_assert1(field_b && b_valid); | |
658 | px = B[0]; | |
659 | py = B[1]; | |
660 | } | |
661 | } | |
662 | } else if (total_valid == 1) { | |
663 | px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]); | |
664 | py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]); | |
665 | } | |
666 | } | |
667 | ||
668 | /* store MV using signed modulus of MV range defined in 4.11 */ | |
669 | s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x; | |
670 | s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y; | |
671 | if (mvn == 1) { /* duplicate motion data for 1-MV block */ | |
672 | s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0]; | |
673 | s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1]; | |
674 | s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0]; | |
675 | s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1]; | |
676 | s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0]; | |
677 | s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1]; | |
678 | } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */ | |
679 | s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0]; | |
680 | s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1]; | |
681 | s->mv[dir][n + 1][0] = s->mv[dir][n][0]; | |
682 | s->mv[dir][n + 1][1] = s->mv[dir][n][1]; | |
683 | } | |
684 | } | |
685 | ||
686 | void ff_vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], | |
687 | int direct, int mvtype) | |
688 | { | |
689 | MpegEncContext *s = &v->s; | |
690 | int xy, wrap, off = 0; | |
691 | int16_t *A, *B, *C; | |
692 | int px, py; | |
693 | int sum; | |
694 | int r_x, r_y; | |
695 | const uint8_t *is_intra = v->mb_type[0]; | |
696 | ||
697 | av_assert0(!v->field_mode); | |
698 | ||
699 | r_x = v->range_x; | |
700 | r_y = v->range_y; | |
701 | /* scale MV difference to be quad-pel */ | |
702 | dmv_x[0] <<= 1 - s->quarter_sample; | |
703 | dmv_y[0] <<= 1 - s->quarter_sample; | |
704 | dmv_x[1] <<= 1 - s->quarter_sample; | |
705 | dmv_y[1] <<= 1 - s->quarter_sample; | |
706 | ||
707 | wrap = s->b8_stride; | |
708 | xy = s->block_index[0]; | |
709 | ||
710 | if (s->mb_intra) { | |
711 | s->current_picture.motion_val[0][xy][0] = | |
712 | s->current_picture.motion_val[0][xy][1] = | |
713 | s->current_picture.motion_val[1][xy][0] = | |
714 | s->current_picture.motion_val[1][xy][1] = 0; | |
715 | return; | |
716 | } | |
717 | if (direct && s->next_picture_ptr->field_picture) | |
718 | av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n"); | |
719 | ||
720 | s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample); | |
721 | s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample); | |
722 | s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample); | |
723 | s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample); | |
724 | ||
725 | /* Pullback predicted motion vectors as specified in 8.4.5.4 */ | |
726 | s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6)); | |
727 | s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6)); | |
728 | s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6)); | |
729 | s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6)); | |
730 | if (direct) { | |
731 | s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0]; | |
732 | s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1]; | |
733 | s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0]; | |
734 | s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1]; | |
735 | return; | |
736 | } | |
737 | ||
738 | if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) { | |
739 | C = s->current_picture.motion_val[0][xy - 2]; | |
740 | A = s->current_picture.motion_val[0][xy - wrap * 2]; | |
741 | off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2; | |
742 | B = s->current_picture.motion_val[0][xy - wrap * 2 + off]; | |
743 | ||
744 | if (!s->mb_x) C[0] = C[1] = 0; | |
745 | if (!s->first_slice_line) { // predictor A is not out of bounds | |
746 | if (s->mb_width == 1) { | |
747 | px = A[0]; | |
748 | py = A[1]; | |
749 | } else { | |
750 | px = mid_pred(A[0], B[0], C[0]); | |
751 | py = mid_pred(A[1], B[1], C[1]); | |
752 | } | |
753 | } else if (s->mb_x) { // predictor C is not out of bounds | |
754 | px = C[0]; | |
755 | py = C[1]; | |
756 | } else { | |
757 | px = py = 0; | |
758 | } | |
759 | /* Pullback MV as specified in 8.3.5.3.4 */ | |
760 | { | |
761 | int qx, qy, X, Y; | |
762 | if (v->profile < PROFILE_ADVANCED) { | |
763 | qx = (s->mb_x << 5); | |
764 | qy = (s->mb_y << 5); | |
765 | X = (s->mb_width << 5) - 4; | |
766 | Y = (s->mb_height << 5) - 4; | |
767 | if (qx + px < -28) px = -28 - qx; | |
768 | if (qy + py < -28) py = -28 - qy; | |
769 | if (qx + px > X) px = X - qx; | |
770 | if (qy + py > Y) py = Y - qy; | |
771 | } else { | |
772 | qx = (s->mb_x << 6); | |
773 | qy = (s->mb_y << 6); | |
774 | X = (s->mb_width << 6) - 4; | |
775 | Y = (s->mb_height << 6) - 4; | |
776 | if (qx + px < -60) px = -60 - qx; | |
777 | if (qy + py < -60) py = -60 - qy; | |
778 | if (qx + px > X) px = X - qx; | |
779 | if (qy + py > Y) py = Y - qy; | |
780 | } | |
781 | } | |
782 | /* Calculate hybrid prediction as specified in 8.3.5.3.5 */ | |
783 | if (0 && !s->first_slice_line && s->mb_x) { | |
784 | if (is_intra[xy - wrap]) | |
785 | sum = FFABS(px) + FFABS(py); | |
786 | else | |
787 | sum = FFABS(px - A[0]) + FFABS(py - A[1]); | |
788 | if (sum > 32) { | |
789 | if (get_bits1(&s->gb)) { | |
790 | px = A[0]; | |
791 | py = A[1]; | |
792 | } else { | |
793 | px = C[0]; | |
794 | py = C[1]; | |
795 | } | |
796 | } else { | |
797 | if (is_intra[xy - 2]) | |
798 | sum = FFABS(px) + FFABS(py); | |
799 | else | |
800 | sum = FFABS(px - C[0]) + FFABS(py - C[1]); | |
801 | if (sum > 32) { | |
802 | if (get_bits1(&s->gb)) { | |
803 | px = A[0]; | |
804 | py = A[1]; | |
805 | } else { | |
806 | px = C[0]; | |
807 | py = C[1]; | |
808 | } | |
809 | } | |
810 | } | |
811 | } | |
812 | /* store MV using signed modulus of MV range defined in 4.11 */ | |
813 | s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x; | |
814 | s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y; | |
815 | } | |
816 | if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) { | |
817 | C = s->current_picture.motion_val[1][xy - 2]; | |
818 | A = s->current_picture.motion_val[1][xy - wrap * 2]; | |
819 | off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2; | |
820 | B = s->current_picture.motion_val[1][xy - wrap * 2 + off]; | |
821 | ||
822 | if (!s->mb_x) | |
823 | C[0] = C[1] = 0; | |
824 | if (!s->first_slice_line) { // predictor A is not out of bounds | |
825 | if (s->mb_width == 1) { | |
826 | px = A[0]; | |
827 | py = A[1]; | |
828 | } else { | |
829 | px = mid_pred(A[0], B[0], C[0]); | |
830 | py = mid_pred(A[1], B[1], C[1]); | |
831 | } | |
832 | } else if (s->mb_x) { // predictor C is not out of bounds | |
833 | px = C[0]; | |
834 | py = C[1]; | |
835 | } else { | |
836 | px = py = 0; | |
837 | } | |
838 | /* Pullback MV as specified in 8.3.5.3.4 */ | |
839 | { | |
840 | int qx, qy, X, Y; | |
841 | if (v->profile < PROFILE_ADVANCED) { | |
842 | qx = (s->mb_x << 5); | |
843 | qy = (s->mb_y << 5); | |
844 | X = (s->mb_width << 5) - 4; | |
845 | Y = (s->mb_height << 5) - 4; | |
846 | if (qx + px < -28) px = -28 - qx; | |
847 | if (qy + py < -28) py = -28 - qy; | |
848 | if (qx + px > X) px = X - qx; | |
849 | if (qy + py > Y) py = Y - qy; | |
850 | } else { | |
851 | qx = (s->mb_x << 6); | |
852 | qy = (s->mb_y << 6); | |
853 | X = (s->mb_width << 6) - 4; | |
854 | Y = (s->mb_height << 6) - 4; | |
855 | if (qx + px < -60) px = -60 - qx; | |
856 | if (qy + py < -60) py = -60 - qy; | |
857 | if (qx + px > X) px = X - qx; | |
858 | if (qy + py > Y) py = Y - qy; | |
859 | } | |
860 | } | |
861 | /* Calculate hybrid prediction as specified in 8.3.5.3.5 */ | |
862 | if (0 && !s->first_slice_line && s->mb_x) { | |
863 | if (is_intra[xy - wrap]) | |
864 | sum = FFABS(px) + FFABS(py); | |
865 | else | |
866 | sum = FFABS(px - A[0]) + FFABS(py - A[1]); | |
867 | if (sum > 32) { | |
868 | if (get_bits1(&s->gb)) { | |
869 | px = A[0]; | |
870 | py = A[1]; | |
871 | } else { | |
872 | px = C[0]; | |
873 | py = C[1]; | |
874 | } | |
875 | } else { | |
876 | if (is_intra[xy - 2]) | |
877 | sum = FFABS(px) + FFABS(py); | |
878 | else | |
879 | sum = FFABS(px - C[0]) + FFABS(py - C[1]); | |
880 | if (sum > 32) { | |
881 | if (get_bits1(&s->gb)) { | |
882 | px = A[0]; | |
883 | py = A[1]; | |
884 | } else { | |
885 | px = C[0]; | |
886 | py = C[1]; | |
887 | } | |
888 | } | |
889 | } | |
890 | } | |
891 | /* store MV using signed modulus of MV range defined in 4.11 */ | |
892 | ||
893 | s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x; | |
894 | s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y; | |
895 | } | |
896 | s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0]; | |
897 | s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1]; | |
898 | s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0]; | |
899 | s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1]; | |
900 | } | |
901 | ||
902 | void ff_vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, | |
903 | int mv1, int *pred_flag) | |
904 | { | |
905 | int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0; | |
906 | MpegEncContext *s = &v->s; | |
907 | int mb_pos = s->mb_x + s->mb_y * s->mb_stride; | |
908 | ||
909 | if (v->bmvtype == BMV_TYPE_DIRECT) { | |
910 | int total_opp, k, f; | |
911 | if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) { | |
912 | s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0], | |
913 | v->bfraction, 0, s->quarter_sample); | |
914 | s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1], | |
915 | v->bfraction, 0, s->quarter_sample); | |
916 | s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0], | |
917 | v->bfraction, 1, s->quarter_sample); | |
918 | s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1], | |
919 | v->bfraction, 1, s->quarter_sample); | |
920 | ||
921 | total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off] | |
922 | + v->mv_f_next[0][s->block_index[1] + v->blocks_off] | |
923 | + v->mv_f_next[0][s->block_index[2] + v->blocks_off] | |
924 | + v->mv_f_next[0][s->block_index[3] + v->blocks_off]; | |
925 | f = (total_opp > 2) ? 1 : 0; | |
926 | } else { | |
927 | s->mv[0][0][0] = s->mv[0][0][1] = 0; | |
928 | s->mv[1][0][0] = s->mv[1][0][1] = 0; | |
929 | f = 0; | |
930 | } | |
931 | v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f; | |
932 | for (k = 0; k < 4; k++) { | |
933 | s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0]; | |
934 | s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1]; | |
935 | s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0]; | |
936 | s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1]; | |
937 | v->mv_f[0][s->block_index[k] + v->blocks_off] = f; | |
938 | v->mv_f[1][s->block_index[k] + v->blocks_off] = f; | |
939 | } | |
940 | return; | |
941 | } | |
942 | if (v->bmvtype == BMV_TYPE_INTERPOLATED) { | |
943 | ff_vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0); | |
944 | ff_vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1); | |
945 | return; | |
946 | } | |
947 | if (dir) { // backward | |
948 | ff_vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1); | |
949 | if (n == 3 || mv1) { | |
950 | ff_vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0); | |
951 | } | |
952 | } else { // forward | |
953 | ff_vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0); | |
954 | if (n == 3 || mv1) { | |
955 | ff_vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1); | |
956 | } | |
957 | } | |
958 | } |