Commit | Line | Data |
---|---|---|
2ba45a60 DM |
1 | /* |
2 | * Copyright (c) 2000,2001 Fabrice Bellard | |
3 | * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> | |
4 | * | |
5 | * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at> | |
6 | * | |
7 | * This file is part of FFmpeg. | |
8 | * | |
9 | * FFmpeg is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; either | |
12 | * version 2.1 of the License, or (at your option) any later version. | |
13 | * | |
14 | * FFmpeg is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with FFmpeg; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
22 | */ | |
23 | ||
24 | #include <string.h> | |
25 | ||
26 | #include "libavutil/avassert.h" | |
27 | #include "libavutil/internal.h" | |
28 | #include "avcodec.h" | |
29 | #include "h261.h" | |
30 | #include "mpegutils.h" | |
31 | #include "mpegvideo.h" | |
32 | #include "mjpegenc.h" | |
33 | #include "msmpeg4.h" | |
34 | #include "qpeldsp.h" | |
35 | #include <limits.h> | |
36 | ||
37 | static void gmc1_motion(MpegEncContext *s, | |
38 | uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, | |
39 | uint8_t **ref_picture) | |
40 | { | |
41 | uint8_t *ptr; | |
42 | int src_x, src_y, motion_x, motion_y; | |
43 | ptrdiff_t offset, linesize, uvlinesize; | |
44 | int emu = 0; | |
45 | ||
46 | motion_x = s->sprite_offset[0][0]; | |
47 | motion_y = s->sprite_offset[0][1]; | |
48 | src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy + 1)); | |
49 | src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy + 1)); | |
50 | motion_x <<= (3 - s->sprite_warping_accuracy); | |
51 | motion_y <<= (3 - s->sprite_warping_accuracy); | |
52 | src_x = av_clip(src_x, -16, s->width); | |
53 | if (src_x == s->width) | |
54 | motion_x = 0; | |
55 | src_y = av_clip(src_y, -16, s->height); | |
56 | if (src_y == s->height) | |
57 | motion_y = 0; | |
58 | ||
59 | linesize = s->linesize; | |
60 | uvlinesize = s->uvlinesize; | |
61 | ||
62 | ptr = ref_picture[0] + src_y * linesize + src_x; | |
63 | ||
64 | if ((unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0) || | |
65 | (unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)) { | |
66 | s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, | |
67 | linesize, linesize, | |
68 | 17, 17, | |
69 | src_x, src_y, | |
70 | s->h_edge_pos, s->v_edge_pos); | |
71 | ptr = s->edge_emu_buffer; | |
72 | } | |
73 | ||
74 | if ((motion_x | motion_y) & 7) { | |
75 | s->mdsp.gmc1(dest_y, ptr, linesize, 16, | |
76 | motion_x & 15, motion_y & 15, 128 - s->no_rounding); | |
77 | s->mdsp.gmc1(dest_y + 8, ptr + 8, linesize, 16, | |
78 | motion_x & 15, motion_y & 15, 128 - s->no_rounding); | |
79 | } else { | |
80 | int dxy; | |
81 | ||
82 | dxy = ((motion_x >> 3) & 1) | ((motion_y >> 2) & 2); | |
83 | if (s->no_rounding) { | |
84 | s->hdsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16); | |
85 | } else { | |
86 | s->hdsp.put_pixels_tab[0][dxy](dest_y, ptr, linesize, 16); | |
87 | } | |
88 | } | |
89 | ||
90 | if (CONFIG_GRAY && s->flags & CODEC_FLAG_GRAY) | |
91 | return; | |
92 | ||
93 | motion_x = s->sprite_offset[1][0]; | |
94 | motion_y = s->sprite_offset[1][1]; | |
95 | src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy + 1)); | |
96 | src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy + 1)); | |
97 | motion_x <<= (3 - s->sprite_warping_accuracy); | |
98 | motion_y <<= (3 - s->sprite_warping_accuracy); | |
99 | src_x = av_clip(src_x, -8, s->width >> 1); | |
100 | if (src_x == s->width >> 1) | |
101 | motion_x = 0; | |
102 | src_y = av_clip(src_y, -8, s->height >> 1); | |
103 | if (src_y == s->height >> 1) | |
104 | motion_y = 0; | |
105 | ||
106 | offset = (src_y * uvlinesize) + src_x; | |
107 | ptr = ref_picture[1] + offset; | |
108 | if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - 9, 0) || | |
109 | (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - 9, 0)) { | |
110 | s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, | |
111 | uvlinesize, uvlinesize, | |
112 | 9, 9, | |
113 | src_x, src_y, | |
114 | s->h_edge_pos >> 1, s->v_edge_pos >> 1); | |
115 | ptr = s->edge_emu_buffer; | |
116 | emu = 1; | |
117 | } | |
118 | s->mdsp.gmc1(dest_cb, ptr, uvlinesize, 8, | |
119 | motion_x & 15, motion_y & 15, 128 - s->no_rounding); | |
120 | ||
121 | ptr = ref_picture[2] + offset; | |
122 | if (emu) { | |
123 | s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, | |
124 | uvlinesize, uvlinesize, | |
125 | 9, 9, | |
126 | src_x, src_y, | |
127 | s->h_edge_pos >> 1, s->v_edge_pos >> 1); | |
128 | ptr = s->edge_emu_buffer; | |
129 | } | |
130 | s->mdsp.gmc1(dest_cr, ptr, uvlinesize, 8, | |
131 | motion_x & 15, motion_y & 15, 128 - s->no_rounding); | |
132 | } | |
133 | ||
134 | static void gmc_motion(MpegEncContext *s, | |
135 | uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, | |
136 | uint8_t **ref_picture) | |
137 | { | |
138 | uint8_t *ptr; | |
139 | int linesize, uvlinesize; | |
140 | const int a = s->sprite_warping_accuracy; | |
141 | int ox, oy; | |
142 | ||
143 | linesize = s->linesize; | |
144 | uvlinesize = s->uvlinesize; | |
145 | ||
146 | ptr = ref_picture[0]; | |
147 | ||
148 | ox = s->sprite_offset[0][0] + s->sprite_delta[0][0] * s->mb_x * 16 + | |
149 | s->sprite_delta[0][1] * s->mb_y * 16; | |
150 | oy = s->sprite_offset[0][1] + s->sprite_delta[1][0] * s->mb_x * 16 + | |
151 | s->sprite_delta[1][1] * s->mb_y * 16; | |
152 | ||
153 | s->mdsp.gmc(dest_y, ptr, linesize, 16, | |
154 | ox, oy, | |
155 | s->sprite_delta[0][0], s->sprite_delta[0][1], | |
156 | s->sprite_delta[1][0], s->sprite_delta[1][1], | |
157 | a + 1, (1 << (2 * a + 1)) - s->no_rounding, | |
158 | s->h_edge_pos, s->v_edge_pos); | |
159 | s->mdsp.gmc(dest_y + 8, ptr, linesize, 16, | |
160 | ox + s->sprite_delta[0][0] * 8, | |
161 | oy + s->sprite_delta[1][0] * 8, | |
162 | s->sprite_delta[0][0], s->sprite_delta[0][1], | |
163 | s->sprite_delta[1][0], s->sprite_delta[1][1], | |
164 | a + 1, (1 << (2 * a + 1)) - s->no_rounding, | |
165 | s->h_edge_pos, s->v_edge_pos); | |
166 | ||
167 | if (CONFIG_GRAY && s->flags & CODEC_FLAG_GRAY) | |
168 | return; | |
169 | ||
170 | ox = s->sprite_offset[1][0] + s->sprite_delta[0][0] * s->mb_x * 8 + | |
171 | s->sprite_delta[0][1] * s->mb_y * 8; | |
172 | oy = s->sprite_offset[1][1] + s->sprite_delta[1][0] * s->mb_x * 8 + | |
173 | s->sprite_delta[1][1] * s->mb_y * 8; | |
174 | ||
175 | ptr = ref_picture[1]; | |
176 | s->mdsp.gmc(dest_cb, ptr, uvlinesize, 8, | |
177 | ox, oy, | |
178 | s->sprite_delta[0][0], s->sprite_delta[0][1], | |
179 | s->sprite_delta[1][0], s->sprite_delta[1][1], | |
180 | a + 1, (1 << (2 * a + 1)) - s->no_rounding, | |
181 | s->h_edge_pos >> 1, s->v_edge_pos >> 1); | |
182 | ||
183 | ptr = ref_picture[2]; | |
184 | s->mdsp.gmc(dest_cr, ptr, uvlinesize, 8, | |
185 | ox, oy, | |
186 | s->sprite_delta[0][0], s->sprite_delta[0][1], | |
187 | s->sprite_delta[1][0], s->sprite_delta[1][1], | |
188 | a + 1, (1 << (2 * a + 1)) - s->no_rounding, | |
189 | s->h_edge_pos >> 1, s->v_edge_pos >> 1); | |
190 | } | |
191 | ||
192 | static inline int hpel_motion(MpegEncContext *s, | |
193 | uint8_t *dest, uint8_t *src, | |
194 | int src_x, int src_y, | |
195 | op_pixels_func *pix_op, | |
196 | int motion_x, int motion_y) | |
197 | { | |
198 | int dxy = 0; | |
199 | int emu = 0; | |
200 | ||
201 | src_x += motion_x >> 1; | |
202 | src_y += motion_y >> 1; | |
203 | ||
204 | /* WARNING: do no forget half pels */ | |
205 | src_x = av_clip(src_x, -16, s->width); // FIXME unneeded for emu? | |
206 | if (src_x != s->width) | |
207 | dxy |= motion_x & 1; | |
208 | src_y = av_clip(src_y, -16, s->height); | |
209 | if (src_y != s->height) | |
210 | dxy |= (motion_y & 1) << 1; | |
211 | src += src_y * s->linesize + src_x; | |
212 | ||
213 | if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 1) - 8, 0) || | |
214 | (unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y & 1) - 8, 0)) { | |
215 | s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, | |
216 | s->linesize, s->linesize, | |
217 | 9, 9, | |
218 | src_x, src_y, | |
219 | s->h_edge_pos, s->v_edge_pos); | |
220 | src = s->edge_emu_buffer; | |
221 | emu = 1; | |
222 | } | |
223 | pix_op[dxy](dest, src, s->linesize, 8); | |
224 | return emu; | |
225 | } | |
226 | ||
227 | static av_always_inline | |
228 | void mpeg_motion_internal(MpegEncContext *s, | |
229 | uint8_t *dest_y, | |
230 | uint8_t *dest_cb, | |
231 | uint8_t *dest_cr, | |
232 | int field_based, | |
233 | int bottom_field, | |
234 | int field_select, | |
235 | uint8_t **ref_picture, | |
236 | op_pixels_func (*pix_op)[4], | |
237 | int motion_x, | |
238 | int motion_y, | |
239 | int h, | |
240 | int is_mpeg12, | |
241 | int mb_y) | |
242 | { | |
243 | uint8_t *ptr_y, *ptr_cb, *ptr_cr; | |
244 | int dxy, uvdxy, mx, my, src_x, src_y, | |
245 | uvsrc_x, uvsrc_y, v_edge_pos; | |
246 | ptrdiff_t uvlinesize, linesize; | |
247 | ||
248 | #if 0 | |
249 | if (s->quarter_sample) { | |
250 | motion_x >>= 1; | |
251 | motion_y >>= 1; | |
252 | } | |
253 | #endif | |
254 | ||
255 | v_edge_pos = s->v_edge_pos >> field_based; | |
256 | linesize = s->current_picture.f->linesize[0] << field_based; | |
257 | uvlinesize = s->current_picture.f->linesize[1] << field_based; | |
258 | ||
259 | dxy = ((motion_y & 1) << 1) | (motion_x & 1); | |
260 | src_x = s->mb_x * 16 + (motion_x >> 1); | |
261 | src_y = (mb_y << (4 - field_based)) + (motion_y >> 1); | |
262 | ||
263 | if (!is_mpeg12 && s->out_format == FMT_H263) { | |
264 | if ((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based) { | |
265 | mx = (motion_x >> 1) | (motion_x & 1); | |
266 | my = motion_y >> 1; | |
267 | uvdxy = ((my & 1) << 1) | (mx & 1); | |
268 | uvsrc_x = s->mb_x * 8 + (mx >> 1); | |
269 | uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1); | |
270 | } else { | |
271 | uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1); | |
272 | uvsrc_x = src_x >> 1; | |
273 | uvsrc_y = src_y >> 1; | |
274 | } | |
275 | // Even chroma mv's are full pel in H261 | |
276 | } else if (!is_mpeg12 && s->out_format == FMT_H261) { | |
277 | mx = motion_x / 4; | |
278 | my = motion_y / 4; | |
279 | uvdxy = 0; | |
280 | uvsrc_x = s->mb_x * 8 + mx; | |
281 | uvsrc_y = mb_y * 8 + my; | |
282 | } else { | |
283 | if (s->chroma_y_shift) { | |
284 | mx = motion_x / 2; | |
285 | my = motion_y / 2; | |
286 | uvdxy = ((my & 1) << 1) | (mx & 1); | |
287 | uvsrc_x = s->mb_x * 8 + (mx >> 1); | |
288 | uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1); | |
289 | } else { | |
290 | if (s->chroma_x_shift) { | |
291 | // Chroma422 | |
292 | mx = motion_x / 2; | |
293 | uvdxy = ((motion_y & 1) << 1) | (mx & 1); | |
294 | uvsrc_x = s->mb_x * 8 + (mx >> 1); | |
295 | uvsrc_y = src_y; | |
296 | } else { | |
297 | // Chroma444 | |
298 | uvdxy = dxy; | |
299 | uvsrc_x = src_x; | |
300 | uvsrc_y = src_y; | |
301 | } | |
302 | } | |
303 | } | |
304 | ||
305 | ptr_y = ref_picture[0] + src_y * linesize + src_x; | |
306 | ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x; | |
307 | ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x; | |
308 | ||
309 | if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 1) - 16, 0) || | |
310 | (unsigned)src_y > FFMAX( v_edge_pos - (motion_y & 1) - h , 0)) { | |
311 | if (is_mpeg12 || | |
312 | s->codec_id == AV_CODEC_ID_MPEG2VIDEO || | |
313 | s->codec_id == AV_CODEC_ID_MPEG1VIDEO) { | |
314 | av_log(s->avctx, AV_LOG_DEBUG, | |
315 | "MPEG motion vector out of boundary (%d %d)\n", src_x, | |
316 | src_y); | |
317 | return; | |
318 | } | |
319 | s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, | |
320 | s->linesize, s->linesize, | |
321 | 17, 17 + field_based, | |
322 | src_x, src_y << field_based, | |
323 | s->h_edge_pos, s->v_edge_pos); | |
324 | ptr_y = s->edge_emu_buffer; | |
325 | if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { | |
326 | uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize; | |
327 | uint8_t *vbuf = ubuf + 9 * s->uvlinesize; | |
328 | s->vdsp.emulated_edge_mc(ubuf, ptr_cb, | |
329 | s->uvlinesize, s->uvlinesize, | |
330 | 9, 9 + field_based, | |
331 | uvsrc_x, uvsrc_y << field_based, | |
332 | s->h_edge_pos >> 1, s->v_edge_pos >> 1); | |
333 | s->vdsp.emulated_edge_mc(vbuf, ptr_cr, | |
334 | s->uvlinesize, s->uvlinesize, | |
335 | 9, 9 + field_based, | |
336 | uvsrc_x, uvsrc_y << field_based, | |
337 | s->h_edge_pos >> 1, s->v_edge_pos >> 1); | |
338 | ptr_cb = ubuf; | |
339 | ptr_cr = vbuf; | |
340 | } | |
341 | } | |
342 | ||
343 | /* FIXME use this for field pix too instead of the obnoxious hack which | |
344 | * changes picture.data */ | |
345 | if (bottom_field) { | |
346 | dest_y += s->linesize; | |
347 | dest_cb += s->uvlinesize; | |
348 | dest_cr += s->uvlinesize; | |
349 | } | |
350 | ||
351 | if (field_select) { | |
352 | ptr_y += s->linesize; | |
353 | ptr_cb += s->uvlinesize; | |
354 | ptr_cr += s->uvlinesize; | |
355 | } | |
356 | ||
357 | pix_op[0][dxy](dest_y, ptr_y, linesize, h); | |
358 | ||
359 | if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { | |
360 | pix_op[s->chroma_x_shift][uvdxy] | |
361 | (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift); | |
362 | pix_op[s->chroma_x_shift][uvdxy] | |
363 | (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift); | |
364 | } | |
365 | if (!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) && | |
366 | s->out_format == FMT_H261) { | |
367 | ff_h261_loop_filter(s); | |
368 | } | |
369 | } | |
370 | /* apply one mpeg motion vector to the three components */ | |
371 | static void mpeg_motion(MpegEncContext *s, | |
372 | uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, | |
373 | int field_select, uint8_t **ref_picture, | |
374 | op_pixels_func (*pix_op)[4], | |
375 | int motion_x, int motion_y, int h, int mb_y) | |
376 | { | |
377 | #if !CONFIG_SMALL | |
378 | if (s->out_format == FMT_MPEG1) | |
379 | mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0, | |
380 | field_select, ref_picture, pix_op, | |
381 | motion_x, motion_y, h, 1, mb_y); | |
382 | else | |
383 | #endif | |
384 | mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0, | |
385 | field_select, ref_picture, pix_op, | |
386 | motion_x, motion_y, h, 0, mb_y); | |
387 | } | |
388 | ||
389 | static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y, | |
390 | uint8_t *dest_cb, uint8_t *dest_cr, | |
391 | int bottom_field, int field_select, | |
392 | uint8_t **ref_picture, | |
393 | op_pixels_func (*pix_op)[4], | |
394 | int motion_x, int motion_y, int h, int mb_y) | |
395 | { | |
396 | #if !CONFIG_SMALL | |
397 | if (s->out_format == FMT_MPEG1) | |
398 | mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1, | |
399 | bottom_field, field_select, ref_picture, pix_op, | |
400 | motion_x, motion_y, h, 1, mb_y); | |
401 | else | |
402 | #endif | |
403 | mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1, | |
404 | bottom_field, field_select, ref_picture, pix_op, | |
405 | motion_x, motion_y, h, 0, mb_y); | |
406 | } | |
407 | ||
408 | // FIXME: SIMDify, avg variant, 16x16 version | |
409 | static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride) | |
410 | { | |
411 | int x; | |
412 | uint8_t *const top = src[1]; | |
413 | uint8_t *const left = src[2]; | |
414 | uint8_t *const mid = src[0]; | |
415 | uint8_t *const right = src[3]; | |
416 | uint8_t *const bottom = src[4]; | |
417 | #define OBMC_FILTER(x, t, l, m, r, b)\ | |
418 | dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3 | |
419 | #define OBMC_FILTER4(x, t, l, m, r, b)\ | |
420 | OBMC_FILTER(x , t, l, m, r, b);\ | |
421 | OBMC_FILTER(x+1 , t, l, m, r, b);\ | |
422 | OBMC_FILTER(x +stride, t, l, m, r, b);\ | |
423 | OBMC_FILTER(x+1+stride, t, l, m, r, b); | |
424 | ||
425 | x = 0; | |
426 | OBMC_FILTER (x , 2, 2, 4, 0, 0); | |
427 | OBMC_FILTER (x + 1, 2, 1, 5, 0, 0); | |
428 | OBMC_FILTER4(x + 2, 2, 1, 5, 0, 0); | |
429 | OBMC_FILTER4(x + 4, 2, 0, 5, 1, 0); | |
430 | OBMC_FILTER (x + 6, 2, 0, 5, 1, 0); | |
431 | OBMC_FILTER (x + 7, 2, 0, 4, 2, 0); | |
432 | x += stride; | |
433 | OBMC_FILTER (x , 1, 2, 5, 0, 0); | |
434 | OBMC_FILTER (x + 1, 1, 2, 5, 0, 0); | |
435 | OBMC_FILTER (x + 6, 1, 0, 5, 2, 0); | |
436 | OBMC_FILTER (x + 7, 1, 0, 5, 2, 0); | |
437 | x += stride; | |
438 | OBMC_FILTER4(x , 1, 2, 5, 0, 0); | |
439 | OBMC_FILTER4(x + 2, 1, 1, 6, 0, 0); | |
440 | OBMC_FILTER4(x + 4, 1, 0, 6, 1, 0); | |
441 | OBMC_FILTER4(x + 6, 1, 0, 5, 2, 0); | |
442 | x += 2 * stride; | |
443 | OBMC_FILTER4(x , 0, 2, 5, 0, 1); | |
444 | OBMC_FILTER4(x + 2, 0, 1, 6, 0, 1); | |
445 | OBMC_FILTER4(x + 4, 0, 0, 6, 1, 1); | |
446 | OBMC_FILTER4(x + 6, 0, 0, 5, 2, 1); | |
447 | x += 2*stride; | |
448 | OBMC_FILTER (x , 0, 2, 5, 0, 1); | |
449 | OBMC_FILTER (x + 1, 0, 2, 5, 0, 1); | |
450 | OBMC_FILTER4(x + 2, 0, 1, 5, 0, 2); | |
451 | OBMC_FILTER4(x + 4, 0, 0, 5, 1, 2); | |
452 | OBMC_FILTER (x + 6, 0, 0, 5, 2, 1); | |
453 | OBMC_FILTER (x + 7, 0, 0, 5, 2, 1); | |
454 | x += stride; | |
455 | OBMC_FILTER (x , 0, 2, 4, 0, 2); | |
456 | OBMC_FILTER (x + 1, 0, 1, 5, 0, 2); | |
457 | OBMC_FILTER (x + 6, 0, 0, 5, 1, 2); | |
458 | OBMC_FILTER (x + 7, 0, 0, 4, 2, 2); | |
459 | } | |
460 | ||
461 | /* obmc for 1 8x8 luma block */ | |
462 | static inline void obmc_motion(MpegEncContext *s, | |
463 | uint8_t *dest, uint8_t *src, | |
464 | int src_x, int src_y, | |
465 | op_pixels_func *pix_op, | |
466 | int16_t mv[5][2] /* mid top left right bottom */) | |
467 | #define MID 0 | |
468 | { | |
469 | int i; | |
470 | uint8_t *ptr[5]; | |
471 | ||
472 | av_assert2(s->quarter_sample == 0); | |
473 | ||
474 | for (i = 0; i < 5; i++) { | |
475 | if (i && mv[i][0] == mv[MID][0] && mv[i][1] == mv[MID][1]) { | |
476 | ptr[i] = ptr[MID]; | |
477 | } else { | |
478 | ptr[i] = s->obmc_scratchpad + 8 * (i & 1) + | |
479 | s->linesize * 8 * (i >> 1); | |
480 | hpel_motion(s, ptr[i], src, src_x, src_y, pix_op, | |
481 | mv[i][0], mv[i][1]); | |
482 | } | |
483 | } | |
484 | ||
485 | put_obmc(dest, ptr, s->linesize); | |
486 | } | |
487 | ||
488 | static inline void qpel_motion(MpegEncContext *s, | |
489 | uint8_t *dest_y, | |
490 | uint8_t *dest_cb, | |
491 | uint8_t *dest_cr, | |
492 | int field_based, int bottom_field, | |
493 | int field_select, uint8_t **ref_picture, | |
494 | op_pixels_func (*pix_op)[4], | |
495 | qpel_mc_func (*qpix_op)[16], | |
496 | int motion_x, int motion_y, int h) | |
497 | { | |
498 | uint8_t *ptr_y, *ptr_cb, *ptr_cr; | |
499 | int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos; | |
500 | ptrdiff_t linesize, uvlinesize; | |
501 | ||
502 | dxy = ((motion_y & 3) << 2) | (motion_x & 3); | |
503 | ||
504 | src_x = s->mb_x * 16 + (motion_x >> 2); | |
505 | src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2); | |
506 | ||
507 | v_edge_pos = s->v_edge_pos >> field_based; | |
508 | linesize = s->linesize << field_based; | |
509 | uvlinesize = s->uvlinesize << field_based; | |
510 | ||
511 | if (field_based) { | |
512 | mx = motion_x / 2; | |
513 | my = motion_y >> 1; | |
514 | } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA2) { | |
515 | static const int rtab[8] = { 0, 0, 1, 1, 0, 0, 0, 1 }; | |
516 | mx = (motion_x >> 1) + rtab[motion_x & 7]; | |
517 | my = (motion_y >> 1) + rtab[motion_y & 7]; | |
518 | } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA) { | |
519 | mx = (motion_x >> 1) | (motion_x & 1); | |
520 | my = (motion_y >> 1) | (motion_y & 1); | |
521 | } else { | |
522 | mx = motion_x / 2; | |
523 | my = motion_y / 2; | |
524 | } | |
525 | mx = (mx >> 1) | (mx & 1); | |
526 | my = (my >> 1) | (my & 1); | |
527 | ||
528 | uvdxy = (mx & 1) | ((my & 1) << 1); | |
529 | mx >>= 1; | |
530 | my >>= 1; | |
531 | ||
532 | uvsrc_x = s->mb_x * 8 + mx; | |
533 | uvsrc_y = s->mb_y * (8 >> field_based) + my; | |
534 | ||
535 | ptr_y = ref_picture[0] + src_y * linesize + src_x; | |
536 | ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x; | |
537 | ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x; | |
538 | ||
539 | if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 3) - 16, 0) || | |
540 | (unsigned)src_y > FFMAX( v_edge_pos - (motion_y & 3) - h, 0)) { | |
541 | s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, | |
542 | s->linesize, s->linesize, | |
543 | 17, 17 + field_based, | |
544 | src_x, src_y << field_based, | |
545 | s->h_edge_pos, s->v_edge_pos); | |
546 | ptr_y = s->edge_emu_buffer; | |
547 | if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { | |
548 | uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize; | |
549 | uint8_t *vbuf = ubuf + 9 * s->uvlinesize; | |
550 | s->vdsp.emulated_edge_mc(ubuf, ptr_cb, | |
551 | s->uvlinesize, s->uvlinesize, | |
552 | 9, 9 + field_based, | |
553 | uvsrc_x, uvsrc_y << field_based, | |
554 | s->h_edge_pos >> 1, s->v_edge_pos >> 1); | |
555 | s->vdsp.emulated_edge_mc(vbuf, ptr_cr, | |
556 | s->uvlinesize, s->uvlinesize, | |
557 | 9, 9 + field_based, | |
558 | uvsrc_x, uvsrc_y << field_based, | |
559 | s->h_edge_pos >> 1, s->v_edge_pos >> 1); | |
560 | ptr_cb = ubuf; | |
561 | ptr_cr = vbuf; | |
562 | } | |
563 | } | |
564 | ||
565 | if (!field_based) | |
566 | qpix_op[0][dxy](dest_y, ptr_y, linesize); | |
567 | else { | |
568 | if (bottom_field) { | |
569 | dest_y += s->linesize; | |
570 | dest_cb += s->uvlinesize; | |
571 | dest_cr += s->uvlinesize; | |
572 | } | |
573 | ||
574 | if (field_select) { | |
575 | ptr_y += s->linesize; | |
576 | ptr_cb += s->uvlinesize; | |
577 | ptr_cr += s->uvlinesize; | |
578 | } | |
579 | // damn interlaced mode | |
580 | // FIXME boundary mirroring is not exactly correct here | |
581 | qpix_op[1][dxy](dest_y, ptr_y, linesize); | |
582 | qpix_op[1][dxy](dest_y + 8, ptr_y + 8, linesize); | |
583 | } | |
584 | if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { | |
585 | pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1); | |
586 | pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1); | |
587 | } | |
588 | } | |
589 | ||
590 | /** | |
591 | * h263 chroma 4mv motion compensation. | |
592 | */ | |
593 | static void chroma_4mv_motion(MpegEncContext *s, | |
594 | uint8_t *dest_cb, uint8_t *dest_cr, | |
595 | uint8_t **ref_picture, | |
596 | op_pixels_func *pix_op, | |
597 | int mx, int my) | |
598 | { | |
599 | uint8_t *ptr; | |
600 | int src_x, src_y, dxy, emu = 0; | |
601 | ptrdiff_t offset; | |
602 | ||
603 | /* In case of 8X8, we construct a single chroma motion vector | |
604 | * with a special rounding */ | |
605 | mx = ff_h263_round_chroma(mx); | |
606 | my = ff_h263_round_chroma(my); | |
607 | ||
608 | dxy = ((my & 1) << 1) | (mx & 1); | |
609 | mx >>= 1; | |
610 | my >>= 1; | |
611 | ||
612 | src_x = s->mb_x * 8 + mx; | |
613 | src_y = s->mb_y * 8 + my; | |
614 | src_x = av_clip(src_x, -8, (s->width >> 1)); | |
615 | if (src_x == (s->width >> 1)) | |
616 | dxy &= ~1; | |
617 | src_y = av_clip(src_y, -8, (s->height >> 1)); | |
618 | if (src_y == (s->height >> 1)) | |
619 | dxy &= ~2; | |
620 | ||
621 | offset = src_y * s->uvlinesize + src_x; | |
622 | ptr = ref_picture[1] + offset; | |
623 | if ((unsigned)src_x > FFMAX((s->h_edge_pos >> 1) - (dxy & 1) - 8, 0) || | |
624 | (unsigned)src_y > FFMAX((s->v_edge_pos >> 1) - (dxy >> 1) - 8, 0)) { | |
625 | s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, | |
626 | s->uvlinesize, s->uvlinesize, | |
627 | 9, 9, src_x, src_y, | |
628 | s->h_edge_pos >> 1, s->v_edge_pos >> 1); | |
629 | ptr = s->edge_emu_buffer; | |
630 | emu = 1; | |
631 | } | |
632 | pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8); | |
633 | ||
634 | ptr = ref_picture[2] + offset; | |
635 | if (emu) { | |
636 | s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, | |
637 | s->uvlinesize, s->uvlinesize, | |
638 | 9, 9, src_x, src_y, | |
639 | s->h_edge_pos >> 1, s->v_edge_pos >> 1); | |
640 | ptr = s->edge_emu_buffer; | |
641 | } | |
642 | pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8); | |
643 | } | |
644 | ||
645 | static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir) | |
646 | { | |
647 | /* fetch pixels for estimated mv 4 macroblocks ahead | |
648 | * optimized for 64byte cache lines */ | |
649 | const int shift = s->quarter_sample ? 2 : 1; | |
650 | const int mx = (s->mv[dir][0][0] >> shift) + 16 * s->mb_x + 8; | |
651 | const int my = (s->mv[dir][0][1] >> shift) + 16 * s->mb_y; | |
652 | int off = mx + (my + (s->mb_x & 3) * 4) * s->linesize + 64; | |
653 | ||
654 | s->vdsp.prefetch(pix[0] + off, s->linesize, 4); | |
655 | off = (mx >> 1) + ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize + 64; | |
656 | s->vdsp.prefetch(pix[1] + off, pix[2] - pix[1], 2); | |
657 | } | |
658 | ||
659 | static inline void apply_obmc(MpegEncContext *s, | |
660 | uint8_t *dest_y, | |
661 | uint8_t *dest_cb, | |
662 | uint8_t *dest_cr, | |
663 | uint8_t **ref_picture, | |
664 | op_pixels_func (*pix_op)[4]) | |
665 | { | |
666 | LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]); | |
667 | Picture *cur_frame = &s->current_picture; | |
668 | int mb_x = s->mb_x; | |
669 | int mb_y = s->mb_y; | |
670 | const int xy = mb_x + mb_y * s->mb_stride; | |
671 | const int mot_stride = s->b8_stride; | |
672 | const int mot_xy = mb_x * 2 + mb_y * 2 * mot_stride; | |
673 | int mx, my, i; | |
674 | ||
675 | av_assert2(!s->mb_skipped); | |
676 | ||
677 | AV_COPY32(mv_cache[1][1], cur_frame->motion_val[0][mot_xy]); | |
678 | AV_COPY32(mv_cache[1][2], cur_frame->motion_val[0][mot_xy + 1]); | |
679 | ||
680 | AV_COPY32(mv_cache[2][1], | |
681 | cur_frame->motion_val[0][mot_xy + mot_stride]); | |
682 | AV_COPY32(mv_cache[2][2], | |
683 | cur_frame->motion_val[0][mot_xy + mot_stride + 1]); | |
684 | ||
685 | AV_COPY32(mv_cache[3][1], | |
686 | cur_frame->motion_val[0][mot_xy + mot_stride]); | |
687 | AV_COPY32(mv_cache[3][2], | |
688 | cur_frame->motion_val[0][mot_xy + mot_stride + 1]); | |
689 | ||
690 | if (mb_y == 0 || IS_INTRA(cur_frame->mb_type[xy - s->mb_stride])) { | |
691 | AV_COPY32(mv_cache[0][1], mv_cache[1][1]); | |
692 | AV_COPY32(mv_cache[0][2], mv_cache[1][2]); | |
693 | } else { | |
694 | AV_COPY32(mv_cache[0][1], | |
695 | cur_frame->motion_val[0][mot_xy - mot_stride]); | |
696 | AV_COPY32(mv_cache[0][2], | |
697 | cur_frame->motion_val[0][mot_xy - mot_stride + 1]); | |
698 | } | |
699 | ||
700 | if (mb_x == 0 || IS_INTRA(cur_frame->mb_type[xy - 1])) { | |
701 | AV_COPY32(mv_cache[1][0], mv_cache[1][1]); | |
702 | AV_COPY32(mv_cache[2][0], mv_cache[2][1]); | |
703 | } else { | |
704 | AV_COPY32(mv_cache[1][0], cur_frame->motion_val[0][mot_xy - 1]); | |
705 | AV_COPY32(mv_cache[2][0], | |
706 | cur_frame->motion_val[0][mot_xy - 1 + mot_stride]); | |
707 | } | |
708 | ||
709 | if (mb_x + 1 >= s->mb_width || IS_INTRA(cur_frame->mb_type[xy + 1])) { | |
710 | AV_COPY32(mv_cache[1][3], mv_cache[1][2]); | |
711 | AV_COPY32(mv_cache[2][3], mv_cache[2][2]); | |
712 | } else { | |
713 | AV_COPY32(mv_cache[1][3], cur_frame->motion_val[0][mot_xy + 2]); | |
714 | AV_COPY32(mv_cache[2][3], | |
715 | cur_frame->motion_val[0][mot_xy + 2 + mot_stride]); | |
716 | } | |
717 | ||
718 | mx = 0; | |
719 | my = 0; | |
720 | for (i = 0; i < 4; i++) { | |
721 | const int x = (i & 1) + 1; | |
722 | const int y = (i >> 1) + 1; | |
723 | int16_t mv[5][2] = { | |
724 | { mv_cache[y][x][0], mv_cache[y][x][1] }, | |
725 | { mv_cache[y - 1][x][0], mv_cache[y - 1][x][1] }, | |
726 | { mv_cache[y][x - 1][0], mv_cache[y][x - 1][1] }, | |
727 | { mv_cache[y][x + 1][0], mv_cache[y][x + 1][1] }, | |
728 | { mv_cache[y + 1][x][0], mv_cache[y + 1][x][1] } | |
729 | }; | |
730 | // FIXME cleanup | |
731 | obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize, | |
732 | ref_picture[0], | |
733 | mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >> 1) * 8, | |
734 | pix_op[1], | |
735 | mv); | |
736 | ||
737 | mx += mv[0][0]; | |
738 | my += mv[0][1]; | |
739 | } | |
740 | if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) | |
741 | chroma_4mv_motion(s, dest_cb, dest_cr, | |
742 | ref_picture, pix_op[1], | |
743 | mx, my); | |
744 | } | |
745 | ||
746 | static inline void apply_8x8(MpegEncContext *s, | |
747 | uint8_t *dest_y, | |
748 | uint8_t *dest_cb, | |
749 | uint8_t *dest_cr, | |
750 | int dir, | |
751 | uint8_t **ref_picture, | |
752 | qpel_mc_func (*qpix_op)[16], | |
753 | op_pixels_func (*pix_op)[4]) | |
754 | { | |
755 | int dxy, mx, my, src_x, src_y; | |
756 | int i; | |
757 | int mb_x = s->mb_x; | |
758 | int mb_y = s->mb_y; | |
759 | uint8_t *ptr, *dest; | |
760 | ||
761 | mx = 0; | |
762 | my = 0; | |
763 | if (s->quarter_sample) { | |
764 | for (i = 0; i < 4; i++) { | |
765 | int motion_x = s->mv[dir][i][0]; | |
766 | int motion_y = s->mv[dir][i][1]; | |
767 | ||
768 | dxy = ((motion_y & 3) << 2) | (motion_x & 3); | |
769 | src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8; | |
770 | src_y = mb_y * 16 + (motion_y >> 2) + (i >> 1) * 8; | |
771 | ||
772 | /* WARNING: do no forget half pels */ | |
773 | src_x = av_clip(src_x, -16, s->width); | |
774 | if (src_x == s->width) | |
775 | dxy &= ~3; | |
776 | src_y = av_clip(src_y, -16, s->height); | |
777 | if (src_y == s->height) | |
778 | dxy &= ~12; | |
779 | ||
780 | ptr = ref_picture[0] + (src_y * s->linesize) + (src_x); | |
781 | if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 3) - 8, 0) || | |
782 | (unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y & 3) - 8, 0)) { | |
783 | s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, | |
784 | s->linesize, s->linesize, | |
785 | 9, 9, | |
786 | src_x, src_y, | |
787 | s->h_edge_pos, | |
788 | s->v_edge_pos); | |
789 | ptr = s->edge_emu_buffer; | |
790 | } | |
791 | dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize; | |
792 | qpix_op[1][dxy](dest, ptr, s->linesize); | |
793 | ||
794 | mx += s->mv[dir][i][0] / 2; | |
795 | my += s->mv[dir][i][1] / 2; | |
796 | } | |
797 | } else { | |
798 | for (i = 0; i < 4; i++) { | |
799 | hpel_motion(s, | |
800 | dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize, | |
801 | ref_picture[0], | |
802 | mb_x * 16 + (i & 1) * 8, | |
803 | mb_y * 16 + (i >> 1) * 8, | |
804 | pix_op[1], | |
805 | s->mv[dir][i][0], | |
806 | s->mv[dir][i][1]); | |
807 | ||
808 | mx += s->mv[dir][i][0]; | |
809 | my += s->mv[dir][i][1]; | |
810 | } | |
811 | } | |
812 | ||
813 | if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) | |
814 | chroma_4mv_motion(s, dest_cb, dest_cr, | |
815 | ref_picture, pix_op[1], mx, my); | |
816 | } | |
817 | ||
818 | /** | |
819 | * motion compensation of a single macroblock | |
820 | * @param s context | |
821 | * @param dest_y luma destination pointer | |
822 | * @param dest_cb chroma cb/u destination pointer | |
823 | * @param dest_cr chroma cr/v destination pointer | |
824 | * @param dir direction (0->forward, 1->backward) | |
825 | * @param ref_picture array[3] of pointers to the 3 planes of the reference picture | |
826 | * @param pix_op halfpel motion compensation function (average or put normally) | |
827 | * @param qpix_op qpel motion compensation function (average or put normally) | |
828 | * the motion vectors are taken from s->mv and the MV type from s->mv_type | |
829 | */ | |
830 | static av_always_inline void mpv_motion_internal(MpegEncContext *s, | |
831 | uint8_t *dest_y, | |
832 | uint8_t *dest_cb, | |
833 | uint8_t *dest_cr, | |
834 | int dir, | |
835 | uint8_t **ref_picture, | |
836 | op_pixels_func (*pix_op)[4], | |
837 | qpel_mc_func (*qpix_op)[16], | |
838 | int is_mpeg12) | |
839 | { | |
840 | int i; | |
841 | int mb_y = s->mb_y; | |
842 | ||
843 | prefetch_motion(s, ref_picture, dir); | |
844 | ||
845 | if (!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B) { | |
846 | apply_obmc(s, dest_y, dest_cb, dest_cr, ref_picture, pix_op); | |
847 | return; | |
848 | } | |
849 | ||
850 | switch (s->mv_type) { | |
851 | case MV_TYPE_16X16: | |
852 | if (s->mcsel) { | |
853 | if (s->real_sprite_warping_points == 1) { | |
854 | gmc1_motion(s, dest_y, dest_cb, dest_cr, | |
855 | ref_picture); | |
856 | } else { | |
857 | gmc_motion(s, dest_y, dest_cb, dest_cr, | |
858 | ref_picture); | |
859 | } | |
860 | } else if (!is_mpeg12 && s->quarter_sample) { | |
861 | qpel_motion(s, dest_y, dest_cb, dest_cr, | |
862 | 0, 0, 0, | |
863 | ref_picture, pix_op, qpix_op, | |
864 | s->mv[dir][0][0], s->mv[dir][0][1], 16); | |
865 | } else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) && | |
866 | s->mspel && s->codec_id == AV_CODEC_ID_WMV2) { | |
867 | ff_mspel_motion(s, dest_y, dest_cb, dest_cr, | |
868 | ref_picture, pix_op, | |
869 | s->mv[dir][0][0], s->mv[dir][0][1], 16); | |
870 | } else { | |
871 | mpeg_motion(s, dest_y, dest_cb, dest_cr, 0, | |
872 | ref_picture, pix_op, | |
873 | s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y); | |
874 | } | |
875 | break; | |
876 | case MV_TYPE_8X8: | |
877 | if (!is_mpeg12) | |
878 | apply_8x8(s, dest_y, dest_cb, dest_cr, | |
879 | dir, ref_picture, qpix_op, pix_op); | |
880 | break; | |
881 | case MV_TYPE_FIELD: | |
882 | if (s->picture_structure == PICT_FRAME) { | |
883 | if (!is_mpeg12 && s->quarter_sample) { | |
884 | for (i = 0; i < 2; i++) | |
885 | qpel_motion(s, dest_y, dest_cb, dest_cr, | |
886 | 1, i, s->field_select[dir][i], | |
887 | ref_picture, pix_op, qpix_op, | |
888 | s->mv[dir][i][0], s->mv[dir][i][1], 8); | |
889 | } else { | |
890 | /* top field */ | |
891 | mpeg_motion_field(s, dest_y, dest_cb, dest_cr, | |
892 | 0, s->field_select[dir][0], | |
893 | ref_picture, pix_op, | |
894 | s->mv[dir][0][0], s->mv[dir][0][1], 8, mb_y); | |
895 | /* bottom field */ | |
896 | mpeg_motion_field(s, dest_y, dest_cb, dest_cr, | |
897 | 1, s->field_select[dir][1], | |
898 | ref_picture, pix_op, | |
899 | s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y); | |
900 | } | |
901 | } else { | |
902 | if ( s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field | |
903 | || !ref_picture[0]) { | |
904 | ref_picture = s->current_picture_ptr->f->data; | |
905 | } | |
906 | ||
907 | mpeg_motion(s, dest_y, dest_cb, dest_cr, | |
908 | s->field_select[dir][0], | |
909 | ref_picture, pix_op, | |
910 | s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y >> 1); | |
911 | } | |
912 | break; | |
913 | case MV_TYPE_16X8: | |
914 | for (i = 0; i < 2; i++) { | |
915 | uint8_t **ref2picture; | |
916 | ||
917 | if ((s->picture_structure == s->field_select[dir][i] + 1 | |
918 | || s->pict_type == AV_PICTURE_TYPE_B || s->first_field) && ref_picture[0]) { | |
919 | ref2picture = ref_picture; | |
920 | } else { | |
921 | ref2picture = s->current_picture_ptr->f->data; | |
922 | } | |
923 | ||
924 | mpeg_motion(s, dest_y, dest_cb, dest_cr, | |
925 | s->field_select[dir][i], | |
926 | ref2picture, pix_op, | |
927 | s->mv[dir][i][0], s->mv[dir][i][1] + 16 * i, | |
928 | 8, mb_y >> 1); | |
929 | ||
930 | dest_y += 16 * s->linesize; | |
931 | dest_cb += (16 >> s->chroma_y_shift) * s->uvlinesize; | |
932 | dest_cr += (16 >> s->chroma_y_shift) * s->uvlinesize; | |
933 | } | |
934 | break; | |
935 | case MV_TYPE_DMV: | |
936 | if (s->picture_structure == PICT_FRAME) { | |
937 | for (i = 0; i < 2; i++) { | |
938 | int j; | |
939 | for (j = 0; j < 2; j++) | |
940 | mpeg_motion_field(s, dest_y, dest_cb, dest_cr, | |
941 | j, j ^ i, ref_picture, pix_op, | |
942 | s->mv[dir][2 * i + j][0], | |
943 | s->mv[dir][2 * i + j][1], 8, mb_y); | |
944 | pix_op = s->hdsp.avg_pixels_tab; | |
945 | } | |
946 | } else { | |
947 | if (!ref_picture[0]) { | |
948 | ref_picture = s->current_picture_ptr->f->data; | |
949 | } | |
950 | for (i = 0; i < 2; i++) { | |
951 | mpeg_motion(s, dest_y, dest_cb, dest_cr, | |
952 | s->picture_structure != i + 1, | |
953 | ref_picture, pix_op, | |
954 | s->mv[dir][2 * i][0], s->mv[dir][2 * i][1], | |
955 | 16, mb_y >> 1); | |
956 | ||
957 | // after put we make avg of the same block | |
958 | pix_op = s->hdsp.avg_pixels_tab; | |
959 | ||
960 | /* opposite parity is always in the same frame if this is | |
961 | * second field */ | |
962 | if (!s->first_field) { | |
963 | ref_picture = s->current_picture_ptr->f->data; | |
964 | } | |
965 | } | |
966 | } | |
967 | break; | |
968 | default: av_assert2(0); | |
969 | } | |
970 | } | |
971 | ||
972 | void ff_mpv_motion(MpegEncContext *s, | |
973 | uint8_t *dest_y, uint8_t *dest_cb, | |
974 | uint8_t *dest_cr, int dir, | |
975 | uint8_t **ref_picture, | |
976 | op_pixels_func (*pix_op)[4], | |
977 | qpel_mc_func (*qpix_op)[16]) | |
978 | { | |
979 | #if !CONFIG_SMALL | |
980 | if (s->out_format == FMT_MPEG1) | |
981 | mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir, | |
982 | ref_picture, pix_op, qpix_op, 1); | |
983 | else | |
984 | #endif | |
985 | mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir, | |
986 | ref_picture, pix_op, qpix_op, 0); | |
987 | } |