2 * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
6 * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 #include "libavutil/common.h"
32 /*****************************************************************************
34 * in-loop deblocking filter
36 ****************************************************************************/
38 #define P2 p0_p[-3*stride]
39 #define P1 p0_p[-2*stride]
40 #define P0 p0_p[-1*stride]
41 #define Q0 p0_p[ 0*stride]
42 #define Q1 p0_p[ 1*stride]
43 #define Q2 p0_p[ 2*stride]
45 static inline void loop_filter_l2(uint8_t *p0_p
,int stride
,int alpha
, int beta
) {
49 if(abs(p0
-q0
)<alpha
&& abs(P1
-p0
)<beta
&& abs(Q1
-q0
)<beta
) {
51 alpha
= (alpha
>>2) + 2;
52 if(abs(P2
-p0
) < beta
&& abs(p0
-q0
) < alpha
) {
53 P0
= (P1
+ p0
+ s
) >> 2;
57 if(abs(Q2
-q0
) < beta
&& abs(q0
-p0
) < alpha
) {
58 Q0
= (Q1
+ q0
+ s
) >> 2;
65 static inline void loop_filter_l1(uint8_t *p0_p
, int stride
, int alpha
, int beta
, int tc
) {
69 if(abs(p0
-q0
)<alpha
&& abs(P1
-p0
)<beta
&& abs(Q1
-q0
)<beta
) {
70 int delta
= av_clip(((q0
-p0
)*3+P1
-Q1
+4)>>3,-tc
, tc
);
71 P0
= av_clip_uint8(p0
+delta
);
72 Q0
= av_clip_uint8(q0
-delta
);
74 delta
= av_clip(((P0
-P1
)*3+P2
-Q0
+4)>>3, -tc
, tc
);
75 P1
= av_clip_uint8(P1
+delta
);
78 delta
= av_clip(((Q1
-Q0
)*3+P0
-Q2
+4)>>3, -tc
, tc
);
79 Q1
= av_clip_uint8(Q1
-delta
);
84 static inline void loop_filter_c2(uint8_t *p0_p
,int stride
,int alpha
, int beta
) {
88 if(abs(p0
-q0
)<alpha
&& abs(P1
-p0
)<beta
&& abs(Q1
-q0
)<beta
) {
90 alpha
= (alpha
>>2) + 2;
91 if(abs(P2
-p0
) < beta
&& abs(p0
-q0
) < alpha
) {
92 P0
= (P1
+ p0
+ s
) >> 2;
95 if(abs(Q2
-q0
) < beta
&& abs(q0
-p0
) < alpha
) {
96 Q0
= (Q1
+ q0
+ s
) >> 2;
102 static inline void loop_filter_c1(uint8_t *p0_p
,int stride
,int alpha
, int beta
,
104 if(abs(P0
-Q0
)<alpha
&& abs(P1
-P0
)<beta
&& abs(Q1
-Q0
)<beta
) {
105 int delta
= av_clip(((Q0
-P0
)*3+P1
-Q1
+4)>>3, -tc
, tc
);
106 P0
= av_clip_uint8(P0
+delta
);
107 Q0
= av_clip_uint8(Q0
-delta
);
118 static void cavs_filter_lv_c(uint8_t *d
, int stride
, int alpha
, int beta
, int tc
,
123 loop_filter_l2(d
+ i
*stride
,1,alpha
,beta
);
127 loop_filter_l1(d
+ i
*stride
,1,alpha
,beta
,tc
);
130 loop_filter_l1(d
+ i
*stride
,1,alpha
,beta
,tc
);
134 static void cavs_filter_lh_c(uint8_t *d
, int stride
, int alpha
, int beta
, int tc
,
139 loop_filter_l2(d
+ i
,stride
,alpha
,beta
);
143 loop_filter_l1(d
+ i
,stride
,alpha
,beta
,tc
);
146 loop_filter_l1(d
+ i
,stride
,alpha
,beta
,tc
);
150 static void cavs_filter_cv_c(uint8_t *d
, int stride
, int alpha
, int beta
, int tc
,
155 loop_filter_c2(d
+ i
*stride
,1,alpha
,beta
);
159 loop_filter_c1(d
+ i
*stride
,1,alpha
,beta
,tc
);
162 loop_filter_c1(d
+ i
*stride
,1,alpha
,beta
,tc
);
166 static void cavs_filter_ch_c(uint8_t *d
, int stride
, int alpha
, int beta
, int tc
,
171 loop_filter_c2(d
+ i
,stride
,alpha
,beta
);
175 loop_filter_c1(d
+ i
,stride
,alpha
,beta
,tc
);
178 loop_filter_c1(d
+ i
,stride
,alpha
,beta
,tc
);
182 /*****************************************************************************
186 ****************************************************************************/
188 static void cavs_idct8_add_c(uint8_t *dst
, int16_t *block
, int stride
) {
190 int16_t (*src
)[8] = (int16_t(*)[8])block
;
191 const uint8_t *cm
= ff_crop_tab
+ MAX_NEG_CROP
;
195 for( i
= 0; i
< 8; i
++ ) {
196 const int a0
= 3*src
[i
][1] - (src
[i
][7]<<1);
197 const int a1
= 3*src
[i
][3] + (src
[i
][5]<<1);
198 const int a2
= (src
[i
][3]<<1) - 3*src
[i
][5];
199 const int a3
= (src
[i
][1]<<1) + 3*src
[i
][7];
201 const int b4
= ((a0
+ a1
+ a3
)<<1) + a1
;
202 const int b5
= ((a0
- a1
+ a2
)<<1) + a0
;
203 const int b6
= ((a3
- a2
- a1
)<<1) + a3
;
204 const int b7
= ((a0
- a2
- a3
)<<1) - a2
;
206 const int a7
= (src
[i
][2]<<2) - 10*src
[i
][6];
207 const int a6
= (src
[i
][6]<<2) + 10*src
[i
][2];
208 const int a5
= ((src
[i
][0] - src
[i
][4]) << 3) + 4;
209 const int a4
= ((src
[i
][0] + src
[i
][4]) << 3) + 4;
211 const int b0
= a4
+ a6
;
212 const int b1
= a5
+ a7
;
213 const int b2
= a5
- a7
;
214 const int b3
= a4
- a6
;
216 src
[i
][0] = (b0
+ b4
) >> 3;
217 src
[i
][1] = (b1
+ b5
) >> 3;
218 src
[i
][2] = (b2
+ b6
) >> 3;
219 src
[i
][3] = (b3
+ b7
) >> 3;
220 src
[i
][4] = (b3
- b7
) >> 3;
221 src
[i
][5] = (b2
- b6
) >> 3;
222 src
[i
][6] = (b1
- b5
) >> 3;
223 src
[i
][7] = (b0
- b4
) >> 3;
225 for( i
= 0; i
< 8; i
++ ) {
226 const int a0
= 3*src
[1][i
] - (src
[7][i
]<<1);
227 const int a1
= 3*src
[3][i
] + (src
[5][i
]<<1);
228 const int a2
= (src
[3][i
]<<1) - 3*src
[5][i
];
229 const int a3
= (src
[1][i
]<<1) + 3*src
[7][i
];
231 const int b4
= ((a0
+ a1
+ a3
)<<1) + a1
;
232 const int b5
= ((a0
- a1
+ a2
)<<1) + a0
;
233 const int b6
= ((a3
- a2
- a1
)<<1) + a3
;
234 const int b7
= ((a0
- a2
- a3
)<<1) - a2
;
236 const int a7
= (src
[2][i
]<<2) - 10*src
[6][i
];
237 const int a6
= (src
[6][i
]<<2) + 10*src
[2][i
];
238 const int a5
= (src
[0][i
] - src
[4][i
]) << 3;
239 const int a4
= (src
[0][i
] + src
[4][i
]) << 3;
241 const int b0
= a4
+ a6
;
242 const int b1
= a5
+ a7
;
243 const int b2
= a5
- a7
;
244 const int b3
= a4
- a6
;
246 dst
[i
+ 0*stride
] = cm
[ dst
[i
+ 0*stride
] + ((b0
+ b4
) >> 7)];
247 dst
[i
+ 1*stride
] = cm
[ dst
[i
+ 1*stride
] + ((b1
+ b5
) >> 7)];
248 dst
[i
+ 2*stride
] = cm
[ dst
[i
+ 2*stride
] + ((b2
+ b6
) >> 7)];
249 dst
[i
+ 3*stride
] = cm
[ dst
[i
+ 3*stride
] + ((b3
+ b7
) >> 7)];
250 dst
[i
+ 4*stride
] = cm
[ dst
[i
+ 4*stride
] + ((b3
- b7
) >> 7)];
251 dst
[i
+ 5*stride
] = cm
[ dst
[i
+ 5*stride
] + ((b2
- b6
) >> 7)];
252 dst
[i
+ 6*stride
] = cm
[ dst
[i
+ 6*stride
] + ((b1
- b5
) >> 7)];
253 dst
[i
+ 7*stride
] = cm
[ dst
[i
+ 7*stride
] + ((b0
- b4
) >> 7)];
257 /*****************************************************************************
259 * motion compensation
261 ****************************************************************************/
263 #define CAVS_SUBPIX(OPNAME, OP, NAME, A, B, C, D, E, F) \
264 static void OPNAME ## cavs_filt8_h_ ## NAME(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride){\
266 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;\
270 OP(dst[0], A*src[-2] + B*src[-1] + C*src[0] + D*src[1] + E*src[2] + F*src[3]);\
271 OP(dst[1], A*src[-1] + B*src[ 0] + C*src[1] + D*src[2] + E*src[3] + F*src[4]);\
272 OP(dst[2], A*src[ 0] + B*src[ 1] + C*src[2] + D*src[3] + E*src[4] + F*src[5]);\
273 OP(dst[3], A*src[ 1] + B*src[ 2] + C*src[3] + D*src[4] + E*src[5] + F*src[6]);\
274 OP(dst[4], A*src[ 2] + B*src[ 3] + C*src[4] + D*src[5] + E*src[6] + F*src[7]);\
275 OP(dst[5], A*src[ 3] + B*src[ 4] + C*src[5] + D*src[6] + E*src[7] + F*src[8]);\
276 OP(dst[6], A*src[ 4] + B*src[ 5] + C*src[6] + D*src[7] + E*src[8] + F*src[9]);\
277 OP(dst[7], A*src[ 5] + B*src[ 6] + C*src[7] + D*src[8] + E*src[9] + F*src[10]);\
283 static void OPNAME ## cavs_filt8_v_ ## NAME(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride){\
285 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;\
289 const int srcB= src[-2*srcStride];\
290 const int srcA= src[-1*srcStride];\
291 const int src0= src[0 *srcStride];\
292 const int src1= src[1 *srcStride];\
293 const int src2= src[2 *srcStride];\
294 const int src3= src[3 *srcStride];\
295 const int src4= src[4 *srcStride];\
296 const int src5= src[5 *srcStride];\
297 const int src6= src[6 *srcStride];\
298 const int src7= src[7 *srcStride];\
299 const int src8= src[8 *srcStride];\
300 const int src9= src[9 *srcStride];\
301 const int src10= src[10 *srcStride];\
302 OP(dst[0*dstStride], A*srcB + B*srcA + C*src0 + D*src1 + E*src2 + F*src3);\
303 OP(dst[1*dstStride], A*srcA + B*src0 + C*src1 + D*src2 + E*src3 + F*src4);\
304 OP(dst[2*dstStride], A*src0 + B*src1 + C*src2 + D*src3 + E*src4 + F*src5);\
305 OP(dst[3*dstStride], A*src1 + B*src2 + C*src3 + D*src4 + E*src5 + F*src6);\
306 OP(dst[4*dstStride], A*src2 + B*src3 + C*src4 + D*src5 + E*src6 + F*src7);\
307 OP(dst[5*dstStride], A*src3 + B*src4 + C*src5 + D*src6 + E*src7 + F*src8);\
308 OP(dst[6*dstStride], A*src4 + B*src5 + C*src6 + D*src7 + E*src8 + F*src9);\
309 OP(dst[7*dstStride], A*src5 + B*src6 + C*src7 + D*src8 + E*src9 + F*src10);\
315 static void OPNAME ## cavs_filt16_v_ ## NAME(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride){\
316 OPNAME ## cavs_filt8_v_ ## NAME(dst , src , dstStride, srcStride);\
317 OPNAME ## cavs_filt8_v_ ## NAME(dst+8, src+8, dstStride, srcStride);\
320 OPNAME ## cavs_filt8_v_ ## NAME(dst , src , dstStride, srcStride);\
321 OPNAME ## cavs_filt8_v_ ## NAME(dst+8, src+8, dstStride, srcStride);\
324 static void OPNAME ## cavs_filt16_h_ ## NAME(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride){\
325 OPNAME ## cavs_filt8_h_ ## NAME(dst , src , dstStride, srcStride);\
326 OPNAME ## cavs_filt8_h_ ## NAME(dst+8, src+8, dstStride, srcStride);\
329 OPNAME ## cavs_filt8_h_ ## NAME(dst , src , dstStride, srcStride);\
330 OPNAME ## cavs_filt8_h_ ## NAME(dst+8, src+8, dstStride, srcStride);\
333 #define CAVS_SUBPIX_HV(OPNAME, OP, NAME, AH, BH, CH, DH, EH, FH, AV, BV, CV, DV, EV, FV, FULL) \
334 static void OPNAME ## cavs_filt8_hv_ ## NAME(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dstStride, int srcStride){\
335 int16_t temp[8*(8+5)];\
336 int16_t *tmp = temp;\
339 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;\
341 src1 -= 2*srcStride;\
342 for(i=0; i<h+5; i++)\
344 tmp[0]= AH*src1[-2] + BH*src1[-1] + CH*src1[0] + DH*src1[1] + EH*src1[2] + FH*src1[3];\
345 tmp[1]= AH*src1[-1] + BH*src1[ 0] + CH*src1[1] + DH*src1[2] + EH*src1[3] + FH*src1[4];\
346 tmp[2]= AH*src1[ 0] + BH*src1[ 1] + CH*src1[2] + DH*src1[3] + EH*src1[4] + FH*src1[5];\
347 tmp[3]= AH*src1[ 1] + BH*src1[ 2] + CH*src1[3] + DH*src1[4] + EH*src1[5] + FH*src1[6];\
348 tmp[4]= AH*src1[ 2] + BH*src1[ 3] + CH*src1[4] + DH*src1[5] + EH*src1[6] + FH*src1[7];\
349 tmp[5]= AH*src1[ 3] + BH*src1[ 4] + CH*src1[5] + DH*src1[6] + EH*src1[7] + FH*src1[8];\
350 tmp[6]= AH*src1[ 4] + BH*src1[ 5] + CH*src1[6] + DH*src1[7] + EH*src1[8] + FH*src1[9];\
351 tmp[7]= AH*src1[ 5] + BH*src1[ 6] + CH*src1[7] + DH*src1[8] + EH*src1[9] + FH*src1[10];\
359 const int tmpB= tmp[-2*8]; \
360 const int tmpA= tmp[-1*8]; \
361 const int tmp0= tmp[0 *8]; \
362 const int tmp1= tmp[1 *8]; \
363 const int tmp2= tmp[2 *8]; \
364 const int tmp3= tmp[3 *8]; \
365 const int tmp4= tmp[4 *8]; \
366 const int tmp5= tmp[5 *8]; \
367 const int tmp6= tmp[6 *8]; \
368 const int tmp7= tmp[7 *8]; \
369 const int tmp8= tmp[8 *8]; \
370 const int tmp9= tmp[9 *8]; \
371 const int tmp10=tmp[10*8]; \
372 OP(dst[0*dstStride], AV*tmpB + BV*tmpA + CV*tmp0 + DV*tmp1 + EV*tmp2 + FV*tmp3 + 64*src2[0*srcStride]); \
373 OP(dst[1*dstStride], AV*tmpA + BV*tmp0 + CV*tmp1 + DV*tmp2 + EV*tmp3 + FV*tmp4 + 64*src2[1*srcStride]); \
374 OP(dst[2*dstStride], AV*tmp0 + BV*tmp1 + CV*tmp2 + DV*tmp3 + EV*tmp4 + FV*tmp5 + 64*src2[2*srcStride]); \
375 OP(dst[3*dstStride], AV*tmp1 + BV*tmp2 + CV*tmp3 + DV*tmp4 + EV*tmp5 + FV*tmp6 + 64*src2[3*srcStride]); \
376 OP(dst[4*dstStride], AV*tmp2 + BV*tmp3 + CV*tmp4 + DV*tmp5 + EV*tmp6 + FV*tmp7 + 64*src2[4*srcStride]); \
377 OP(dst[5*dstStride], AV*tmp3 + BV*tmp4 + CV*tmp5 + DV*tmp6 + EV*tmp7 + FV*tmp8 + 64*src2[5*srcStride]); \
378 OP(dst[6*dstStride], AV*tmp4 + BV*tmp5 + CV*tmp6 + DV*tmp7 + EV*tmp8 + FV*tmp9 + 64*src2[6*srcStride]); \
379 OP(dst[7*dstStride], AV*tmp5 + BV*tmp6 + CV*tmp7 + DV*tmp8 + EV*tmp9 + FV*tmp10 + 64*src2[7*srcStride]); \
388 const int tmpB= tmp[-2*8]; \
389 const int tmpA= tmp[-1*8]; \
390 const int tmp0= tmp[0 *8]; \
391 const int tmp1= tmp[1 *8]; \
392 const int tmp2= tmp[2 *8]; \
393 const int tmp3= tmp[3 *8]; \
394 const int tmp4= tmp[4 *8]; \
395 const int tmp5= tmp[5 *8]; \
396 const int tmp6= tmp[6 *8]; \
397 const int tmp7= tmp[7 *8]; \
398 const int tmp8= tmp[8 *8]; \
399 const int tmp9= tmp[9 *8]; \
400 const int tmp10=tmp[10*8]; \
401 OP(dst[0*dstStride], AV*tmpB + BV*tmpA + CV*tmp0 + DV*tmp1 + EV*tmp2 + FV*tmp3); \
402 OP(dst[1*dstStride], AV*tmpA + BV*tmp0 + CV*tmp1 + DV*tmp2 + EV*tmp3 + FV*tmp4); \
403 OP(dst[2*dstStride], AV*tmp0 + BV*tmp1 + CV*tmp2 + DV*tmp3 + EV*tmp4 + FV*tmp5); \
404 OP(dst[3*dstStride], AV*tmp1 + BV*tmp2 + CV*tmp3 + DV*tmp4 + EV*tmp5 + FV*tmp6); \
405 OP(dst[4*dstStride], AV*tmp2 + BV*tmp3 + CV*tmp4 + DV*tmp5 + EV*tmp6 + FV*tmp7); \
406 OP(dst[5*dstStride], AV*tmp3 + BV*tmp4 + CV*tmp5 + DV*tmp6 + EV*tmp7 + FV*tmp8); \
407 OP(dst[6*dstStride], AV*tmp4 + BV*tmp5 + CV*tmp6 + DV*tmp7 + EV*tmp8 + FV*tmp9); \
408 OP(dst[7*dstStride], AV*tmp5 + BV*tmp6 + CV*tmp7 + DV*tmp8 + EV*tmp9 + FV*tmp10); \
415 static void OPNAME ## cavs_filt16_hv_ ## NAME(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dstStride, int srcStride){ \
416 OPNAME ## cavs_filt8_hv_ ## NAME(dst , src1, src2 , dstStride, srcStride); \
417 OPNAME ## cavs_filt8_hv_ ## NAME(dst+8, src1+8, src2+8, dstStride, srcStride); \
418 src1 += 8*srcStride;\
419 src2 += 8*srcStride;\
421 OPNAME ## cavs_filt8_hv_ ## NAME(dst , src1, src2 , dstStride, srcStride); \
422 OPNAME ## cavs_filt8_hv_ ## NAME(dst+8, src1+8, src2+8, dstStride, srcStride); \
425 #define CAVS_MC(OPNAME, SIZE) \
426 static void OPNAME ## cavs_qpel ## SIZE ## _mc10_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
428 OPNAME ## cavs_filt ## SIZE ## _h_qpel_l(dst, src, stride, stride);\
431 static void OPNAME ## cavs_qpel ## SIZE ## _mc20_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
433 OPNAME ## cavs_filt ## SIZE ## _h_hpel(dst, src, stride, stride);\
436 static void OPNAME ## cavs_qpel ## SIZE ## _mc30_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
438 OPNAME ## cavs_filt ## SIZE ## _h_qpel_r(dst, src, stride, stride);\
441 static void OPNAME ## cavs_qpel ## SIZE ## _mc01_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
443 OPNAME ## cavs_filt ## SIZE ## _v_qpel_l(dst, src, stride, stride);\
446 static void OPNAME ## cavs_qpel ## SIZE ## _mc02_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
448 OPNAME ## cavs_filt ## SIZE ## _v_hpel(dst, src, stride, stride);\
451 static void OPNAME ## cavs_qpel ## SIZE ## _mc03_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
453 OPNAME ## cavs_filt ## SIZE ## _v_qpel_r(dst, src, stride, stride);\
456 static void OPNAME ## cavs_qpel ## SIZE ## _mc22_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
458 OPNAME ## cavs_filt ## SIZE ## _hv_jj(dst, src, NULL, stride, stride); \
461 static void OPNAME ## cavs_qpel ## SIZE ## _mc11_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
463 OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src, stride, stride); \
466 static void OPNAME ## cavs_qpel ## SIZE ## _mc13_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
468 OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src+stride, stride, stride); \
471 static void OPNAME ## cavs_qpel ## SIZE ## _mc31_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
473 OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src+1, stride, stride); \
476 static void OPNAME ## cavs_qpel ## SIZE ## _mc33_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
478 OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src+stride+1,stride, stride); \
481 static void OPNAME ## cavs_qpel ## SIZE ## _mc21_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
483 OPNAME ## cavs_filt ## SIZE ## _hv_ff(dst, src, src+stride+1,stride, stride); \
486 static void OPNAME ## cavs_qpel ## SIZE ## _mc12_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
488 OPNAME ## cavs_filt ## SIZE ## _hv_ii(dst, src, src+stride+1,stride, stride); \
491 static void OPNAME ## cavs_qpel ## SIZE ## _mc32_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
493 OPNAME ## cavs_filt ## SIZE ## _hv_kk(dst, src, src+stride+1,stride, stride); \
496 static void OPNAME ## cavs_qpel ## SIZE ## _mc23_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
498 OPNAME ## cavs_filt ## SIZE ## _hv_qq(dst, src, src+stride+1,stride, stride); \
501 #define op_put1(a, b) a = cm[((b)+4)>>3]
502 #define op_put2(a, b) a = cm[((b)+64)>>7]
503 #define op_put3(a, b) a = cm[((b)+32)>>6]
504 #define op_put4(a, b) a = cm[((b)+512)>>10]
505 #define op_avg1(a, b) a = ((a)+cm[((b)+4)>>3] +1)>>1
506 #define op_avg2(a, b) a = ((a)+cm[((b)+64)>>7] +1)>>1
507 #define op_avg3(a, b) a = ((a)+cm[((b)+32)>>6] +1)>>1
508 #define op_avg4(a, b) a = ((a)+cm[((b)+512)>>10]+1)>>1
509 CAVS_SUBPIX(put_
, op_put1
, hpel
, 0, -1, 5, 5, -1, 0)
510 CAVS_SUBPIX(put_
, op_put2
, qpel_l
, -1, -2, 96, 42, -7, 0)
511 CAVS_SUBPIX(put_
, op_put2
, qpel_r
, 0, -7, 42, 96, -2, -1)
512 CAVS_SUBPIX_HV(put_
, op_put3
, jj
, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 0)
513 CAVS_SUBPIX_HV(put_
, op_put4
, ff
, 0, -1, 5, 5, -1, 0, -1, -2, 96, 42, -7, 0, 0)
514 CAVS_SUBPIX_HV(put_
, op_put4
, ii
, -1, -2, 96, 42, -7, 0, 0, -1, 5, 5, -1, 0, 0)
515 CAVS_SUBPIX_HV(put_
, op_put4
, kk
, 0, -7, 42, 96, -2, -1, 0, -1, 5, 5, -1, 0, 0)
516 CAVS_SUBPIX_HV(put_
, op_put4
, qq
, 0, -1, 5, 5, -1, 0, 0, -7, 42, 96, -2,-1, 0)
517 CAVS_SUBPIX_HV(put_
, op_put2
, egpr
, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 1)
518 CAVS_SUBPIX(avg_
, op_avg1
, hpel
, 0, -1, 5, 5, -1, 0)
519 CAVS_SUBPIX(avg_
, op_avg2
, qpel_l
, -1, -2, 96, 42, -7, 0)
520 CAVS_SUBPIX(avg_
, op_avg2
, qpel_r
, 0, -7, 42, 96, -2, -1)
521 CAVS_SUBPIX_HV(avg_
, op_avg3
, jj
, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 0)
522 CAVS_SUBPIX_HV(avg_
, op_avg4
, ff
, 0, -1, 5, 5, -1, 0, -1, -2, 96, 42, -7, 0, 0)
523 CAVS_SUBPIX_HV(avg_
, op_avg4
, ii
, -1, -2, 96, 42, -7, 0, 0, -1, 5, 5, -1, 0, 0)
524 CAVS_SUBPIX_HV(avg_
, op_avg4
, kk
, 0, -7, 42, 96, -2, -1, 0, -1, 5, 5, -1, 0, 0)
525 CAVS_SUBPIX_HV(avg_
, op_avg4
, qq
, 0, -1, 5, 5, -1, 0, 0, -7, 42, 96, -2,-1, 0)
526 CAVS_SUBPIX_HV(avg_
, op_avg2
, egpr
, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 1)
532 #define put_cavs_qpel8_mc00_c ff_put_pixels8x8_c
533 #define avg_cavs_qpel8_mc00_c ff_avg_pixels8x8_c
534 #define put_cavs_qpel16_mc00_c ff_put_pixels16x16_c
535 #define avg_cavs_qpel16_mc00_c ff_avg_pixels16x16_c
537 av_cold
void ff_cavsdsp_init(CAVSDSPContext
* c
, AVCodecContext
*avctx
) {
538 #define dspfunc(PFX, IDX, NUM) \
539 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_c; \
540 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_c; \
541 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_c; \
542 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_c; \
543 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_c; \
544 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_c; \
545 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_c; \
546 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_c; \
547 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_c; \
548 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_c; \
549 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \
550 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \
551 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \
552 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \
553 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \
554 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c
555 dspfunc(put_cavs_qpel
, 0, 16);
556 dspfunc(put_cavs_qpel
, 1, 8);
557 dspfunc(avg_cavs_qpel
, 0, 16);
558 dspfunc(avg_cavs_qpel
, 1, 8);
559 c
->cavs_filter_lv
= cavs_filter_lv_c
;
560 c
->cavs_filter_lh
= cavs_filter_lh_c
;
561 c
->cavs_filter_cv
= cavs_filter_cv_c
;
562 c
->cavs_filter_ch
= cavs_filter_ch_c
;
563 c
->cavs_idct8_add
= cavs_idct8_add_c
;
564 c
->idct_perm
= FF_IDCT_PERM_NONE
;
567 ff_cavsdsp_init_x86(c
, avctx
);