| 1 | /* |
| 2 | * Copyright (c) 2011 Michael Niedermayer |
| 3 | * |
| 4 | * This file is part of FFmpeg. |
| 5 | * |
| 6 | * FFmpeg is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; either version 2 of the License, or |
| 9 | * (at your option) any later version. |
| 10 | * |
| 11 | * FFmpeg is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * along with FFmpeg; if not, write to the Free Software |
| 18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 19 | * |
| 20 | * Parts of this file have been stolen from mplayer |
| 21 | */ |
| 22 | |
| 23 | /** |
| 24 | * @file |
| 25 | */ |
| 26 | |
| 27 | #include "avfilter.h" |
| 28 | #include "video.h" |
| 29 | #include "formats.h" |
| 30 | #include "internal.h" |
| 31 | #include "libavutil/avassert.h" |
| 32 | #include "libavutil/pixdesc.h" |
| 33 | #include "libavutil/intreadwrite.h" |
| 34 | #include "libavutil/imgutils.h" |
| 35 | #include "libavutil/opt.h" |
| 36 | |
| 37 | #include "libmpcodecs/vf.h" |
| 38 | #include "libmpcodecs/img_format.h" |
| 39 | #include "libmpcodecs/cpudetect.h" |
| 40 | #include "libmpcodecs/av_helpers.h" |
| 41 | #include "libmpcodecs/libvo/fastmemcpy.h" |
| 42 | |
| 43 | #include "libswscale/swscale.h" |
| 44 | |
| 45 | |
| 46 | //FIXME maybe link the orig in |
| 47 | //XXX: identical pix_fmt must be following with each others |
| 48 | static const struct { |
| 49 | int fmt; |
| 50 | enum AVPixelFormat pix_fmt; |
| 51 | } conversion_map[] = { |
| 52 | {IMGFMT_ARGB, AV_PIX_FMT_ARGB}, |
| 53 | {IMGFMT_BGRA, AV_PIX_FMT_BGRA}, |
| 54 | {IMGFMT_BGR24, AV_PIX_FMT_BGR24}, |
| 55 | {IMGFMT_BGR16BE, AV_PIX_FMT_RGB565BE}, |
| 56 | {IMGFMT_BGR16LE, AV_PIX_FMT_RGB565LE}, |
| 57 | {IMGFMT_BGR15BE, AV_PIX_FMT_RGB555BE}, |
| 58 | {IMGFMT_BGR15LE, AV_PIX_FMT_RGB555LE}, |
| 59 | {IMGFMT_BGR12BE, AV_PIX_FMT_RGB444BE}, |
| 60 | {IMGFMT_BGR12LE, AV_PIX_FMT_RGB444LE}, |
| 61 | {IMGFMT_BGR8, AV_PIX_FMT_RGB8}, |
| 62 | {IMGFMT_BGR4, AV_PIX_FMT_RGB4}, |
| 63 | {IMGFMT_BGR1, AV_PIX_FMT_MONOBLACK}, |
| 64 | {IMGFMT_RGB1, AV_PIX_FMT_MONOBLACK}, |
| 65 | {IMGFMT_RG4B, AV_PIX_FMT_BGR4_BYTE}, |
| 66 | {IMGFMT_BG4B, AV_PIX_FMT_RGB4_BYTE}, |
| 67 | {IMGFMT_RGB48LE, AV_PIX_FMT_RGB48LE}, |
| 68 | {IMGFMT_RGB48BE, AV_PIX_FMT_RGB48BE}, |
| 69 | {IMGFMT_ABGR, AV_PIX_FMT_ABGR}, |
| 70 | {IMGFMT_RGBA, AV_PIX_FMT_RGBA}, |
| 71 | {IMGFMT_RGB24, AV_PIX_FMT_RGB24}, |
| 72 | {IMGFMT_RGB16BE, AV_PIX_FMT_BGR565BE}, |
| 73 | {IMGFMT_RGB16LE, AV_PIX_FMT_BGR565LE}, |
| 74 | {IMGFMT_RGB15BE, AV_PIX_FMT_BGR555BE}, |
| 75 | {IMGFMT_RGB15LE, AV_PIX_FMT_BGR555LE}, |
| 76 | {IMGFMT_RGB12BE, AV_PIX_FMT_BGR444BE}, |
| 77 | {IMGFMT_RGB12LE, AV_PIX_FMT_BGR444LE}, |
| 78 | {IMGFMT_RGB8, AV_PIX_FMT_BGR8}, |
| 79 | {IMGFMT_RGB4, AV_PIX_FMT_BGR4}, |
| 80 | {IMGFMT_BGR8, AV_PIX_FMT_PAL8}, |
| 81 | {IMGFMT_YUY2, AV_PIX_FMT_YUYV422}, |
| 82 | {IMGFMT_UYVY, AV_PIX_FMT_UYVY422}, |
| 83 | {IMGFMT_NV12, AV_PIX_FMT_NV12}, |
| 84 | {IMGFMT_NV21, AV_PIX_FMT_NV21}, |
| 85 | {IMGFMT_Y800, AV_PIX_FMT_GRAY8}, |
| 86 | {IMGFMT_Y8, AV_PIX_FMT_GRAY8}, |
| 87 | {IMGFMT_YVU9, AV_PIX_FMT_YUV410P}, |
| 88 | {IMGFMT_IF09, AV_PIX_FMT_YUV410P}, |
| 89 | {IMGFMT_YV12, AV_PIX_FMT_YUV420P}, |
| 90 | {IMGFMT_I420, AV_PIX_FMT_YUV420P}, |
| 91 | {IMGFMT_IYUV, AV_PIX_FMT_YUV420P}, |
| 92 | {IMGFMT_411P, AV_PIX_FMT_YUV411P}, |
| 93 | {IMGFMT_422P, AV_PIX_FMT_YUV422P}, |
| 94 | {IMGFMT_444P, AV_PIX_FMT_YUV444P}, |
| 95 | {IMGFMT_440P, AV_PIX_FMT_YUV440P}, |
| 96 | |
| 97 | {IMGFMT_420A, AV_PIX_FMT_YUVA420P}, |
| 98 | |
| 99 | {IMGFMT_420P16_LE, AV_PIX_FMT_YUV420P16LE}, |
| 100 | {IMGFMT_420P16_BE, AV_PIX_FMT_YUV420P16BE}, |
| 101 | {IMGFMT_422P16_LE, AV_PIX_FMT_YUV422P16LE}, |
| 102 | {IMGFMT_422P16_BE, AV_PIX_FMT_YUV422P16BE}, |
| 103 | {IMGFMT_444P16_LE, AV_PIX_FMT_YUV444P16LE}, |
| 104 | {IMGFMT_444P16_BE, AV_PIX_FMT_YUV444P16BE}, |
| 105 | |
| 106 | // YUVJ are YUV formats that use the full Y range and not just |
| 107 | // 16 - 235 (see colorspaces.txt). |
| 108 | // Currently they are all treated the same way. |
| 109 | {IMGFMT_YV12, AV_PIX_FMT_YUVJ420P}, |
| 110 | {IMGFMT_422P, AV_PIX_FMT_YUVJ422P}, |
| 111 | {IMGFMT_444P, AV_PIX_FMT_YUVJ444P}, |
| 112 | {IMGFMT_440P, AV_PIX_FMT_YUVJ440P}, |
| 113 | |
| 114 | #if FF_API_XVMC |
| 115 | {IMGFMT_XVMC_MOCO_MPEG2, AV_PIX_FMT_XVMC_MPEG2_MC}, |
| 116 | {IMGFMT_XVMC_IDCT_MPEG2, AV_PIX_FMT_XVMC_MPEG2_IDCT}, |
| 117 | #endif /* FF_API_XVMC */ |
| 118 | |
| 119 | {IMGFMT_VDPAU_MPEG1, AV_PIX_FMT_VDPAU_MPEG1}, |
| 120 | {IMGFMT_VDPAU_MPEG2, AV_PIX_FMT_VDPAU_MPEG2}, |
| 121 | {IMGFMT_VDPAU_H264, AV_PIX_FMT_VDPAU_H264}, |
| 122 | {IMGFMT_VDPAU_WMV3, AV_PIX_FMT_VDPAU_WMV3}, |
| 123 | {IMGFMT_VDPAU_VC1, AV_PIX_FMT_VDPAU_VC1}, |
| 124 | {IMGFMT_VDPAU_MPEG4, AV_PIX_FMT_VDPAU_MPEG4}, |
| 125 | {0, AV_PIX_FMT_NONE} |
| 126 | }; |
| 127 | |
| 128 | extern const vf_info_t ff_vf_info_eq2; |
| 129 | extern const vf_info_t ff_vf_info_eq; |
| 130 | extern const vf_info_t ff_vf_info_fspp; |
| 131 | extern const vf_info_t ff_vf_info_ilpack; |
| 132 | extern const vf_info_t ff_vf_info_pp7; |
| 133 | extern const vf_info_t ff_vf_info_softpulldown; |
| 134 | extern const vf_info_t ff_vf_info_uspp; |
| 135 | |
| 136 | |
| 137 | static const vf_info_t* const filters[]={ |
| 138 | &ff_vf_info_eq2, |
| 139 | &ff_vf_info_eq, |
| 140 | &ff_vf_info_fspp, |
| 141 | &ff_vf_info_ilpack, |
| 142 | &ff_vf_info_pp7, |
| 143 | &ff_vf_info_softpulldown, |
| 144 | &ff_vf_info_uspp, |
| 145 | |
| 146 | NULL |
| 147 | }; |
| 148 | |
| 149 | /* |
| 150 | Unsupported filters |
| 151 | 1bpp |
| 152 | ass |
| 153 | bmovl |
| 154 | crop |
| 155 | dvbscale |
| 156 | flip |
| 157 | expand |
| 158 | format |
| 159 | halfpack |
| 160 | lavc |
| 161 | lavcdeint |
| 162 | noformat |
| 163 | pp |
| 164 | scale |
| 165 | tfields |
| 166 | vo |
| 167 | yadif |
| 168 | zrmjpeg |
| 169 | */ |
| 170 | |
| 171 | CpuCaps ff_gCpuCaps; //FIXME initialize this so optims work |
| 172 | |
| 173 | enum AVPixelFormat ff_mp2ff_pix_fmt(int mp){ |
| 174 | int i; |
| 175 | for(i=0; conversion_map[i].fmt && mp != conversion_map[i].fmt; i++) |
| 176 | ; |
| 177 | return mp == conversion_map[i].fmt ? conversion_map[i].pix_fmt : AV_PIX_FMT_NONE; |
| 178 | } |
| 179 | |
| 180 | typedef struct { |
| 181 | const AVClass *class; |
| 182 | vf_instance_t vf; |
| 183 | vf_instance_t next_vf; |
| 184 | AVFilterContext *avfctx; |
| 185 | int frame_returned; |
| 186 | char *filter; |
| 187 | enum AVPixelFormat in_pix_fmt; |
| 188 | } MPContext; |
| 189 | |
| 190 | #define OFFSET(x) offsetof(MPContext, x) |
| 191 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM |
| 192 | static const AVOption mp_options[] = { |
| 193 | { "filter", "set MPlayer filter name and parameters", OFFSET(filter), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS }, |
| 194 | { NULL } |
| 195 | }; |
| 196 | |
| 197 | AVFILTER_DEFINE_CLASS(mp); |
| 198 | |
| 199 | void ff_mp_msg(int mod, int lev, const char *format, ... ){ |
| 200 | va_list va; |
| 201 | va_start(va, format); |
| 202 | //FIXME convert lev/mod |
| 203 | av_vlog(NULL, AV_LOG_DEBUG, format, va); |
| 204 | va_end(va); |
| 205 | } |
| 206 | |
| 207 | int ff_mp_msg_test(int mod, int lev){ |
| 208 | return 123; |
| 209 | } |
| 210 | |
| 211 | void ff_init_avcodec(void) |
| 212 | { |
| 213 | //we maybe should init but its kinda 1. unneeded 2. a bit impolite from here |
| 214 | } |
| 215 | |
| 216 | //Exact copy of vf.c |
| 217 | void ff_vf_clone_mpi_attributes(mp_image_t* dst, mp_image_t* src){ |
| 218 | dst->pict_type= src->pict_type; |
| 219 | dst->fields = src->fields; |
| 220 | dst->qscale_type= src->qscale_type; |
| 221 | if(dst->width == src->width && dst->height == src->height){ |
| 222 | dst->qstride= src->qstride; |
| 223 | dst->qscale= src->qscale; |
| 224 | } |
| 225 | } |
| 226 | |
| 227 | //Exact copy of vf.c |
| 228 | void ff_vf_next_draw_slice(struct vf_instance *vf,unsigned char** src, int * stride,int w, int h, int x, int y){ |
| 229 | if (vf->next->draw_slice) { |
| 230 | vf->next->draw_slice(vf->next,src,stride,w,h,x,y); |
| 231 | return; |
| 232 | } |
| 233 | if (!vf->dmpi) { |
| 234 | ff_mp_msg(MSGT_VFILTER,MSGL_ERR,"draw_slice: dmpi not stored by vf_%s\n", vf->info->name); |
| 235 | return; |
| 236 | } |
| 237 | if (!(vf->dmpi->flags & MP_IMGFLAG_PLANAR)) { |
| 238 | memcpy_pic(vf->dmpi->planes[0]+y*vf->dmpi->stride[0]+vf->dmpi->bpp/8*x, |
| 239 | src[0], vf->dmpi->bpp/8*w, h, vf->dmpi->stride[0], stride[0]); |
| 240 | return; |
| 241 | } |
| 242 | memcpy_pic(vf->dmpi->planes[0]+y*vf->dmpi->stride[0]+x, src[0], |
| 243 | w, h, vf->dmpi->stride[0], stride[0]); |
| 244 | memcpy_pic(vf->dmpi->planes[1]+(y>>vf->dmpi->chroma_y_shift)*vf->dmpi->stride[1]+(x>>vf->dmpi->chroma_x_shift), |
| 245 | src[1], w>>vf->dmpi->chroma_x_shift, h>>vf->dmpi->chroma_y_shift, vf->dmpi->stride[1], stride[1]); |
| 246 | memcpy_pic(vf->dmpi->planes[2]+(y>>vf->dmpi->chroma_y_shift)*vf->dmpi->stride[2]+(x>>vf->dmpi->chroma_x_shift), |
| 247 | src[2], w>>vf->dmpi->chroma_x_shift, h>>vf->dmpi->chroma_y_shift, vf->dmpi->stride[2], stride[2]); |
| 248 | } |
| 249 | |
| 250 | //Exact copy of vf.c |
| 251 | void ff_vf_mpi_clear(mp_image_t* mpi,int x0,int y0,int w,int h){ |
| 252 | int y; |
| 253 | if(mpi->flags&MP_IMGFLAG_PLANAR){ |
| 254 | y0&=~1;h+=h&1; |
| 255 | if(x0==0 && w==mpi->width){ |
| 256 | // full width clear: |
| 257 | memset(mpi->planes[0]+mpi->stride[0]*y0,0,mpi->stride[0]*h); |
| 258 | memset(mpi->planes[1]+mpi->stride[1]*(y0>>mpi->chroma_y_shift),128,mpi->stride[1]*(h>>mpi->chroma_y_shift)); |
| 259 | memset(mpi->planes[2]+mpi->stride[2]*(y0>>mpi->chroma_y_shift),128,mpi->stride[2]*(h>>mpi->chroma_y_shift)); |
| 260 | } else |
| 261 | for(y=y0;y<y0+h;y+=2){ |
| 262 | memset(mpi->planes[0]+x0+mpi->stride[0]*y,0,w); |
| 263 | memset(mpi->planes[0]+x0+mpi->stride[0]*(y+1),0,w); |
| 264 | memset(mpi->planes[1]+(x0>>mpi->chroma_x_shift)+mpi->stride[1]*(y>>mpi->chroma_y_shift),128,(w>>mpi->chroma_x_shift)); |
| 265 | memset(mpi->planes[2]+(x0>>mpi->chroma_x_shift)+mpi->stride[2]*(y>>mpi->chroma_y_shift),128,(w>>mpi->chroma_x_shift)); |
| 266 | } |
| 267 | return; |
| 268 | } |
| 269 | // packed: |
| 270 | for(y=y0;y<y0+h;y++){ |
| 271 | unsigned char* dst=mpi->planes[0]+mpi->stride[0]*y+(mpi->bpp>>3)*x0; |
| 272 | if(mpi->flags&MP_IMGFLAG_YUV){ |
| 273 | unsigned int* p=(unsigned int*) dst; |
| 274 | int size=(mpi->bpp>>3)*w/4; |
| 275 | int i; |
| 276 | #if HAVE_BIGENDIAN |
| 277 | #define CLEAR_PACKEDYUV_PATTERN 0x00800080 |
| 278 | #define CLEAR_PACKEDYUV_PATTERN_SWAPPED 0x80008000 |
| 279 | #else |
| 280 | #define CLEAR_PACKEDYUV_PATTERN 0x80008000 |
| 281 | #define CLEAR_PACKEDYUV_PATTERN_SWAPPED 0x00800080 |
| 282 | #endif |
| 283 | if(mpi->flags&MP_IMGFLAG_SWAPPED){ |
| 284 | for(i=0;i<size-3;i+=4) p[i]=p[i+1]=p[i+2]=p[i+3]=CLEAR_PACKEDYUV_PATTERN_SWAPPED; |
| 285 | for(;i<size;i++) p[i]=CLEAR_PACKEDYUV_PATTERN_SWAPPED; |
| 286 | } else { |
| 287 | for(i=0;i<size-3;i+=4) p[i]=p[i+1]=p[i+2]=p[i+3]=CLEAR_PACKEDYUV_PATTERN; |
| 288 | for(;i<size;i++) p[i]=CLEAR_PACKEDYUV_PATTERN; |
| 289 | } |
| 290 | } else |
| 291 | memset(dst,0,(mpi->bpp>>3)*w); |
| 292 | } |
| 293 | } |
| 294 | |
| 295 | int ff_vf_next_query_format(struct vf_instance *vf, unsigned int fmt){ |
| 296 | return 1; |
| 297 | } |
| 298 | |
| 299 | //used by delogo |
| 300 | unsigned int ff_vf_match_csp(vf_instance_t** vfp,const unsigned int* list,unsigned int preferred){ |
| 301 | return preferred; |
| 302 | } |
| 303 | |
| 304 | mp_image_t* ff_vf_get_image(vf_instance_t* vf, unsigned int outfmt, int mp_imgtype, int mp_imgflag, int w, int h){ |
| 305 | MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, next_vf)); |
| 306 | mp_image_t* mpi=NULL; |
| 307 | int w2; |
| 308 | int number = mp_imgtype >> 16; |
| 309 | |
| 310 | av_assert0(vf->next == NULL); // all existing filters call this just on next |
| 311 | |
| 312 | //vf_dint needs these as it calls ff_vf_get_image() before configuring the output |
| 313 | if(vf->w==0 && w>0) vf->w=w; |
| 314 | if(vf->h==0 && h>0) vf->h=h; |
| 315 | |
| 316 | av_assert0(w == -1 || w >= vf->w); |
| 317 | av_assert0(h == -1 || h >= vf->h); |
| 318 | av_assert0(vf->w > 0); |
| 319 | av_assert0(vf->h > 0); |
| 320 | |
| 321 | av_log(m->avfctx, AV_LOG_DEBUG, "get_image: %d:%d, vf: %d:%d\n", w,h,vf->w,vf->h); |
| 322 | |
| 323 | if (w == -1) w = vf->w; |
| 324 | if (h == -1) h = vf->h; |
| 325 | |
| 326 | w2=(mp_imgflag&MP_IMGFLAG_ACCEPT_ALIGNED_STRIDE)?((w+15)&(~15)):w; |
| 327 | |
| 328 | // Note: we should call libvo first to check if it supports direct rendering |
| 329 | // and if not, then fallback to software buffers: |
| 330 | switch(mp_imgtype & 0xff){ |
| 331 | case MP_IMGTYPE_EXPORT: |
| 332 | if(!vf->imgctx.export_images[0]) vf->imgctx.export_images[0]=ff_new_mp_image(w2,h); |
| 333 | mpi=vf->imgctx.export_images[0]; |
| 334 | break; |
| 335 | case MP_IMGTYPE_STATIC: |
| 336 | if(!vf->imgctx.static_images[0]) vf->imgctx.static_images[0]=ff_new_mp_image(w2,h); |
| 337 | mpi=vf->imgctx.static_images[0]; |
| 338 | break; |
| 339 | case MP_IMGTYPE_TEMP: |
| 340 | if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=ff_new_mp_image(w2,h); |
| 341 | mpi=vf->imgctx.temp_images[0]; |
| 342 | break; |
| 343 | case MP_IMGTYPE_IPB: |
| 344 | if(!(mp_imgflag&MP_IMGFLAG_READABLE)){ // B frame: |
| 345 | if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=ff_new_mp_image(w2,h); |
| 346 | mpi=vf->imgctx.temp_images[0]; |
| 347 | break; |
| 348 | } |
| 349 | case MP_IMGTYPE_IP: |
| 350 | if(!vf->imgctx.static_images[vf->imgctx.static_idx]) vf->imgctx.static_images[vf->imgctx.static_idx]=ff_new_mp_image(w2,h); |
| 351 | mpi=vf->imgctx.static_images[vf->imgctx.static_idx]; |
| 352 | vf->imgctx.static_idx^=1; |
| 353 | break; |
| 354 | case MP_IMGTYPE_NUMBERED: |
| 355 | if (number == -1) { |
| 356 | int i; |
| 357 | for (i = 0; i < NUM_NUMBERED_MPI; i++) |
| 358 | if (!vf->imgctx.numbered_images[i] || !vf->imgctx.numbered_images[i]->usage_count) |
| 359 | break; |
| 360 | number = i; |
| 361 | } |
| 362 | if (number < 0 || number >= NUM_NUMBERED_MPI) return NULL; |
| 363 | if (!vf->imgctx.numbered_images[number]) vf->imgctx.numbered_images[number] = ff_new_mp_image(w2,h); |
| 364 | mpi = vf->imgctx.numbered_images[number]; |
| 365 | mpi->number = number; |
| 366 | break; |
| 367 | } |
| 368 | if(mpi){ |
| 369 | mpi->type=mp_imgtype; |
| 370 | mpi->w=vf->w; mpi->h=vf->h; |
| 371 | // keep buffer allocation status & color flags only: |
| 372 | // mpi->flags&=~(MP_IMGFLAG_PRESERVE|MP_IMGFLAG_READABLE|MP_IMGFLAG_DIRECT); |
| 373 | mpi->flags&=MP_IMGFLAG_ALLOCATED|MP_IMGFLAG_TYPE_DISPLAYED|MP_IMGFLAGMASK_COLORS; |
| 374 | // accept restrictions, draw_slice and palette flags only: |
| 375 | mpi->flags|=mp_imgflag&(MP_IMGFLAGMASK_RESTRICTIONS|MP_IMGFLAG_DRAW_CALLBACK|MP_IMGFLAG_RGB_PALETTE); |
| 376 | if(!vf->draw_slice) mpi->flags&=~MP_IMGFLAG_DRAW_CALLBACK; |
| 377 | if(mpi->width!=w2 || mpi->height!=h){ |
| 378 | // printf("vf.c: MPI parameters changed! %dx%d -> %dx%d \n", mpi->width,mpi->height,w2,h); |
| 379 | if(mpi->flags&MP_IMGFLAG_ALLOCATED){ |
| 380 | if(mpi->width<w2 || mpi->height<h){ |
| 381 | // need to re-allocate buffer memory: |
| 382 | av_free(mpi->planes[0]); |
| 383 | mpi->flags&=~MP_IMGFLAG_ALLOCATED; |
| 384 | ff_mp_msg(MSGT_VFILTER,MSGL_V,"vf.c: have to REALLOCATE buffer memory :(\n"); |
| 385 | } |
| 386 | // } else { |
| 387 | } { |
| 388 | mpi->width=w2; mpi->chroma_width=(w2 + (1<<mpi->chroma_x_shift) - 1)>>mpi->chroma_x_shift; |
| 389 | mpi->height=h; mpi->chroma_height=(h + (1<<mpi->chroma_y_shift) - 1)>>mpi->chroma_y_shift; |
| 390 | } |
| 391 | } |
| 392 | if(!mpi->bpp) ff_mp_image_setfmt(mpi,outfmt); |
| 393 | if(!(mpi->flags&MP_IMGFLAG_ALLOCATED) && mpi->type>MP_IMGTYPE_EXPORT){ |
| 394 | |
| 395 | av_assert0(!vf->get_image); |
| 396 | // check libvo first! |
| 397 | if(vf->get_image) vf->get_image(vf,mpi); |
| 398 | |
| 399 | if(!(mpi->flags&MP_IMGFLAG_DIRECT)){ |
| 400 | // non-direct and not yet allocated image. allocate it! |
| 401 | if (!mpi->bpp) { // no way we can allocate this |
| 402 | ff_mp_msg(MSGT_DECVIDEO, MSGL_FATAL, |
| 403 | "ff_vf_get_image: Tried to allocate a format that can not be allocated!\n"); |
| 404 | return NULL; |
| 405 | } |
| 406 | |
| 407 | // check if codec prefer aligned stride: |
| 408 | if(mp_imgflag&MP_IMGFLAG_PREFER_ALIGNED_STRIDE){ |
| 409 | int align=(mpi->flags&MP_IMGFLAG_PLANAR && |
| 410 | mpi->flags&MP_IMGFLAG_YUV) ? |
| 411 | (8<<mpi->chroma_x_shift)-1 : 15; // -- maybe FIXME |
| 412 | w2=((w+align)&(~align)); |
| 413 | if(mpi->width!=w2){ |
| 414 | #if 0 |
| 415 | // we have to change width... check if we CAN co it: |
| 416 | int flags=vf->query_format(vf,outfmt); // should not fail |
| 417 | if(!(flags&3)) ff_mp_msg(MSGT_DECVIDEO,MSGL_WARN,"??? ff_vf_get_image{vf->query_format(outfmt)} failed!\n"); |
| 418 | // printf("query -> 0x%X \n",flags); |
| 419 | if(flags&VFCAP_ACCEPT_STRIDE){ |
| 420 | #endif |
| 421 | mpi->width=w2; |
| 422 | mpi->chroma_width=(w2 + (1<<mpi->chroma_x_shift) - 1)>>mpi->chroma_x_shift; |
| 423 | // } |
| 424 | } |
| 425 | } |
| 426 | |
| 427 | ff_mp_image_alloc_planes(mpi); |
| 428 | // printf("clearing img!\n"); |
| 429 | ff_vf_mpi_clear(mpi,0,0,mpi->width,mpi->height); |
| 430 | } |
| 431 | } |
| 432 | av_assert0(!vf->start_slice); |
| 433 | if(mpi->flags&MP_IMGFLAG_DRAW_CALLBACK) |
| 434 | if(vf->start_slice) vf->start_slice(vf,mpi); |
| 435 | if(!(mpi->flags&MP_IMGFLAG_TYPE_DISPLAYED)){ |
| 436 | ff_mp_msg(MSGT_DECVIDEO,MSGL_V,"*** [%s] %s%s mp_image_t, %dx%dx%dbpp %s %s, %d bytes\n", |
| 437 | "NULL"/*vf->info->name*/, |
| 438 | (mpi->type==MP_IMGTYPE_EXPORT)?"Exporting": |
| 439 | ((mpi->flags&MP_IMGFLAG_DIRECT)?"Direct Rendering":"Allocating"), |
| 440 | (mpi->flags&MP_IMGFLAG_DRAW_CALLBACK)?" (slices)":"", |
| 441 | mpi->width,mpi->height,mpi->bpp, |
| 442 | (mpi->flags&MP_IMGFLAG_YUV)?"YUV":((mpi->flags&MP_IMGFLAG_SWAPPED)?"BGR":"RGB"), |
| 443 | (mpi->flags&MP_IMGFLAG_PLANAR)?"planar":"packed", |
| 444 | mpi->bpp*mpi->width*mpi->height/8); |
| 445 | ff_mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"(imgfmt: %x, planes: %p,%p,%p strides: %d,%d,%d, chroma: %dx%d, shift: h:%d,v:%d)\n", |
| 446 | mpi->imgfmt, mpi->planes[0], mpi->planes[1], mpi->planes[2], |
| 447 | mpi->stride[0], mpi->stride[1], mpi->stride[2], |
| 448 | mpi->chroma_width, mpi->chroma_height, mpi->chroma_x_shift, mpi->chroma_y_shift); |
| 449 | mpi->flags|=MP_IMGFLAG_TYPE_DISPLAYED; |
| 450 | } |
| 451 | |
| 452 | mpi->qscale = NULL; |
| 453 | mpi->usage_count++; |
| 454 | } |
| 455 | // printf("\rVF_MPI: %p %p %p %d %d %d \n", |
| 456 | // mpi->planes[0],mpi->planes[1],mpi->planes[2], |
| 457 | // mpi->stride[0],mpi->stride[1],mpi->stride[2]); |
| 458 | return mpi; |
| 459 | } |
| 460 | |
| 461 | int ff_vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts){ |
| 462 | MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf)); |
| 463 | AVFilterLink *outlink = m->avfctx->outputs[0]; |
| 464 | AVFrame *picref = av_frame_alloc(); |
| 465 | int i; |
| 466 | |
| 467 | av_assert0(vf->next); |
| 468 | |
| 469 | av_log(m->avfctx, AV_LOG_DEBUG, "ff_vf_next_put_image\n"); |
| 470 | |
| 471 | if (!picref) |
| 472 | goto fail; |
| 473 | |
| 474 | picref->width = mpi->w; |
| 475 | picref->height = mpi->h; |
| 476 | |
| 477 | for(i=0; conversion_map[i].fmt && mpi->imgfmt != conversion_map[i].fmt; i++); |
| 478 | picref->format = conversion_map[i].pix_fmt; |
| 479 | |
| 480 | for(i=0; conversion_map[i].fmt && m->in_pix_fmt != conversion_map[i].pix_fmt; i++); |
| 481 | if (mpi->imgfmt == conversion_map[i].fmt) |
| 482 | picref->format = conversion_map[i].pix_fmt; |
| 483 | |
| 484 | memcpy(picref->linesize, mpi->stride, FFMIN(sizeof(picref->linesize), sizeof(mpi->stride))); |
| 485 | |
| 486 | for(i=0; i<4 && mpi->stride[i]; i++){ |
| 487 | picref->data[i] = mpi->planes[i]; |
| 488 | } |
| 489 | |
| 490 | if(pts != MP_NOPTS_VALUE) |
| 491 | picref->pts= pts * av_q2d(outlink->time_base); |
| 492 | |
| 493 | if(1) { // mp buffers are currently unsupported in libavfilter, we thus must copy |
| 494 | AVFrame *tofree = picref; |
| 495 | picref = av_frame_clone(picref); |
| 496 | av_frame_free(&tofree); |
| 497 | } |
| 498 | |
| 499 | ff_filter_frame(outlink, picref); |
| 500 | m->frame_returned++; |
| 501 | |
| 502 | return 1; |
| 503 | fail: |
| 504 | av_frame_free(&picref); |
| 505 | return 0; |
| 506 | } |
| 507 | |
| 508 | int ff_vf_next_config(struct vf_instance *vf, |
| 509 | int width, int height, int d_width, int d_height, |
| 510 | unsigned int voflags, unsigned int outfmt){ |
| 511 | |
| 512 | av_assert0(width>0 && height>0); |
| 513 | vf->next->w = width; vf->next->h = height; |
| 514 | |
| 515 | return 1; |
| 516 | #if 0 |
| 517 | int flags=vf->next->query_format(vf->next,outfmt); |
| 518 | if(!flags){ |
| 519 | // hmm. colorspace mismatch!!! |
| 520 | //this is fatal for us ATM |
| 521 | return 0; |
| 522 | } |
| 523 | ff_mp_msg(MSGT_VFILTER,MSGL_V,"REQ: flags=0x%X req=0x%X \n",flags,vf->default_reqs); |
| 524 | miss=vf->default_reqs - (flags&vf->default_reqs); |
| 525 | if(miss&VFCAP_ACCEPT_STRIDE){ |
| 526 | // vf requires stride support but vf->next doesn't support it! |
| 527 | // let's insert the 'expand' filter, it does the job for us: |
| 528 | vf_instance_t* vf2=vf_open_filter(vf->next,"expand",NULL); |
| 529 | if(!vf2) return 0; // shouldn't happen! |
| 530 | vf->next=vf2; |
| 531 | } |
| 532 | vf->next->w = width; vf->next->h = height; |
| 533 | return 1; |
| 534 | #endif |
| 535 | } |
| 536 | |
| 537 | int ff_vf_next_control(struct vf_instance *vf, int request, void* data){ |
| 538 | MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf)); |
| 539 | av_log(m->avfctx, AV_LOG_DEBUG, "Received control %d\n", request); |
| 540 | return 0; |
| 541 | } |
| 542 | |
| 543 | static int vf_default_query_format(struct vf_instance *vf, unsigned int fmt){ |
| 544 | MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf)); |
| 545 | int i; |
| 546 | av_log(m->avfctx, AV_LOG_DEBUG, "query %X\n", fmt); |
| 547 | |
| 548 | for(i=0; conversion_map[i].fmt; i++){ |
| 549 | if(fmt==conversion_map[i].fmt) |
| 550 | return 1; //we suport all |
| 551 | } |
| 552 | return 0; |
| 553 | } |
| 554 | |
| 555 | |
| 556 | static av_cold int init(AVFilterContext *ctx) |
| 557 | { |
| 558 | MPContext *m = ctx->priv; |
| 559 | int cpu_flags = av_get_cpu_flags(); |
| 560 | char name[256]; |
| 561 | const char *args; |
| 562 | int i; |
| 563 | |
| 564 | ff_gCpuCaps.hasMMX = cpu_flags & AV_CPU_FLAG_MMX; |
| 565 | ff_gCpuCaps.hasMMX2 = cpu_flags & AV_CPU_FLAG_MMX2; |
| 566 | ff_gCpuCaps.hasSSE = cpu_flags & AV_CPU_FLAG_SSE; |
| 567 | ff_gCpuCaps.hasSSE2 = cpu_flags & AV_CPU_FLAG_SSE2; |
| 568 | ff_gCpuCaps.hasSSE3 = cpu_flags & AV_CPU_FLAG_SSE3; |
| 569 | ff_gCpuCaps.hasSSSE3 = cpu_flags & AV_CPU_FLAG_SSSE3; |
| 570 | ff_gCpuCaps.hasSSE4 = cpu_flags & AV_CPU_FLAG_SSE4; |
| 571 | ff_gCpuCaps.hasSSE42 = cpu_flags & AV_CPU_FLAG_SSE42; |
| 572 | ff_gCpuCaps.hasAVX = cpu_flags & AV_CPU_FLAG_AVX; |
| 573 | ff_gCpuCaps.has3DNow = cpu_flags & AV_CPU_FLAG_3DNOW; |
| 574 | ff_gCpuCaps.has3DNowExt = cpu_flags & AV_CPU_FLAG_3DNOWEXT; |
| 575 | |
| 576 | m->avfctx= ctx; |
| 577 | |
| 578 | args = m->filter; |
| 579 | if(!args || 1!=sscanf(args, "%255[^:=]", name)){ |
| 580 | av_log(ctx, AV_LOG_ERROR, "Invalid parameter.\n"); |
| 581 | return AVERROR(EINVAL); |
| 582 | } |
| 583 | args += strlen(name); |
| 584 | if (args[0] == '=') |
| 585 | args++; |
| 586 | |
| 587 | for(i=0; ;i++){ |
| 588 | if(!filters[i] || !strcmp(name, filters[i]->name)) |
| 589 | break; |
| 590 | } |
| 591 | |
| 592 | if(!filters[i]){ |
| 593 | av_log(ctx, AV_LOG_ERROR, "Unknown filter %s\n", name); |
| 594 | return AVERROR(EINVAL); |
| 595 | } |
| 596 | |
| 597 | av_log(ctx, AV_LOG_WARNING, |
| 598 | "'%s' is a wrapped MPlayer filter (libmpcodecs). This filter may be removed\n" |
| 599 | "once it has been ported to a native libavfilter.\n", name); |
| 600 | |
| 601 | memset(&m->vf,0,sizeof(m->vf)); |
| 602 | m->vf.info= filters[i]; |
| 603 | |
| 604 | m->vf.next = &m->next_vf; |
| 605 | m->vf.put_image = ff_vf_next_put_image; |
| 606 | m->vf.config = ff_vf_next_config; |
| 607 | m->vf.query_format= vf_default_query_format; |
| 608 | m->vf.control = ff_vf_next_control; |
| 609 | m->vf.default_caps=VFCAP_ACCEPT_STRIDE; |
| 610 | m->vf.default_reqs=0; |
| 611 | if(m->vf.info->opts) |
| 612 | av_log(ctx, AV_LOG_ERROR, "opts / m_struct_set is unsupported\n"); |
| 613 | #if 0 |
| 614 | if(vf->info->opts) { // vf_vo get some special argument |
| 615 | const m_struct_t* st = vf->info->opts; |
| 616 | void* vf_priv = m_struct_alloc(st); |
| 617 | int n; |
| 618 | for(n = 0 ; args && args[2*n] ; n++) |
| 619 | m_struct_set(st,vf_priv,args[2*n],args[2*n+1]); |
| 620 | vf->priv = vf_priv; |
| 621 | args = NULL; |
| 622 | } else // Otherwise we should have the '_oldargs_' |
| 623 | if(args && !strcmp(args[0],"_oldargs_")) |
| 624 | args = (char**)args[1]; |
| 625 | else |
| 626 | args = NULL; |
| 627 | #endif |
| 628 | if(m->vf.info->vf_open(&m->vf, (char*)args)<=0){ |
| 629 | av_log(ctx, AV_LOG_ERROR, "vf_open() of %s with arg=%s failed\n", name, args); |
| 630 | return -1; |
| 631 | } |
| 632 | |
| 633 | return 0; |
| 634 | } |
| 635 | |
| 636 | static av_cold void uninit(AVFilterContext *ctx) |
| 637 | { |
| 638 | MPContext *m = ctx->priv; |
| 639 | vf_instance_t *vf = &m->vf; |
| 640 | |
| 641 | while(vf){ |
| 642 | vf_instance_t *next = vf->next; |
| 643 | if(vf->uninit) |
| 644 | vf->uninit(vf); |
| 645 | ff_free_mp_image(vf->imgctx.static_images[0]); |
| 646 | ff_free_mp_image(vf->imgctx.static_images[1]); |
| 647 | ff_free_mp_image(vf->imgctx.temp_images[0]); |
| 648 | ff_free_mp_image(vf->imgctx.export_images[0]); |
| 649 | vf = next; |
| 650 | } |
| 651 | } |
| 652 | |
| 653 | static int query_formats(AVFilterContext *ctx) |
| 654 | { |
| 655 | AVFilterFormats *avfmts=NULL; |
| 656 | MPContext *m = ctx->priv; |
| 657 | enum AVPixelFormat lastpixfmt = AV_PIX_FMT_NONE; |
| 658 | int i; |
| 659 | |
| 660 | for(i=0; conversion_map[i].fmt; i++){ |
| 661 | av_log(ctx, AV_LOG_DEBUG, "query: %X\n", conversion_map[i].fmt); |
| 662 | if(m->vf.query_format(&m->vf, conversion_map[i].fmt)){ |
| 663 | av_log(ctx, AV_LOG_DEBUG, "supported,adding\n"); |
| 664 | if (conversion_map[i].pix_fmt != lastpixfmt) { |
| 665 | ff_add_format(&avfmts, conversion_map[i].pix_fmt); |
| 666 | lastpixfmt = conversion_map[i].pix_fmt; |
| 667 | } |
| 668 | } |
| 669 | } |
| 670 | |
| 671 | if (!avfmts) |
| 672 | return -1; |
| 673 | |
| 674 | //We assume all allowed input formats are also allowed output formats |
| 675 | ff_set_common_formats(ctx, avfmts); |
| 676 | return 0; |
| 677 | } |
| 678 | |
| 679 | static int config_inprops(AVFilterLink *inlink) |
| 680 | { |
| 681 | MPContext *m = inlink->dst->priv; |
| 682 | int i; |
| 683 | for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++); |
| 684 | |
| 685 | av_assert0(conversion_map[i].fmt && inlink->w && inlink->h); |
| 686 | |
| 687 | m->vf.fmt.have_configured = 1; |
| 688 | m->vf.fmt.orig_height = inlink->h; |
| 689 | m->vf.fmt.orig_width = inlink->w; |
| 690 | m->vf.fmt.orig_fmt = conversion_map[i].fmt; |
| 691 | |
| 692 | if(m->vf.config(&m->vf, inlink->w, inlink->h, inlink->w, inlink->h, 0, conversion_map[i].fmt)<=0) |
| 693 | return -1; |
| 694 | |
| 695 | return 0; |
| 696 | } |
| 697 | |
| 698 | static int config_outprops(AVFilterLink *outlink) |
| 699 | { |
| 700 | MPContext *m = outlink->src->priv; |
| 701 | |
| 702 | outlink->w = m->next_vf.w; |
| 703 | outlink->h = m->next_vf.h; |
| 704 | |
| 705 | return 0; |
| 706 | } |
| 707 | |
| 708 | static int request_frame(AVFilterLink *outlink) |
| 709 | { |
| 710 | MPContext *m = outlink->src->priv; |
| 711 | int ret; |
| 712 | |
| 713 | av_log(m->avfctx, AV_LOG_DEBUG, "mp request_frame\n"); |
| 714 | |
| 715 | for(m->frame_returned=0; !m->frame_returned;){ |
| 716 | ret=ff_request_frame(outlink->src->inputs[0]); |
| 717 | if(ret<0) |
| 718 | break; |
| 719 | } |
| 720 | |
| 721 | av_log(m->avfctx, AV_LOG_DEBUG, "mp request_frame ret=%d\n", ret); |
| 722 | return ret; |
| 723 | } |
| 724 | |
| 725 | static int filter_frame(AVFilterLink *inlink, AVFrame *inpic) |
| 726 | { |
| 727 | MPContext *m = inlink->dst->priv; |
| 728 | int i; |
| 729 | double pts= MP_NOPTS_VALUE; |
| 730 | mp_image_t* mpi = ff_new_mp_image(inpic->width, inpic->height); |
| 731 | |
| 732 | if(inpic->pts != AV_NOPTS_VALUE) |
| 733 | pts= inpic->pts / av_q2d(inlink->time_base); |
| 734 | |
| 735 | for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++); |
| 736 | ff_mp_image_setfmt(mpi,conversion_map[i].fmt); |
| 737 | m->in_pix_fmt = inlink->format; |
| 738 | |
| 739 | memcpy(mpi->planes, inpic->data, FFMIN(sizeof(inpic->data) , sizeof(mpi->planes))); |
| 740 | memcpy(mpi->stride, inpic->linesize, FFMIN(sizeof(inpic->linesize), sizeof(mpi->stride))); |
| 741 | |
| 742 | if (inpic->interlaced_frame) |
| 743 | mpi->fields |= MP_IMGFIELD_INTERLACED; |
| 744 | if (inpic->top_field_first) |
| 745 | mpi->fields |= MP_IMGFIELD_TOP_FIRST; |
| 746 | if (inpic->repeat_pict) |
| 747 | mpi->fields |= MP_IMGFIELD_REPEAT_FIRST; |
| 748 | |
| 749 | // mpi->flags|=MP_IMGFLAG_ALLOCATED; ? |
| 750 | mpi->flags |= MP_IMGFLAG_READABLE; |
| 751 | if(!av_frame_is_writable(inpic)) |
| 752 | mpi->flags |= MP_IMGFLAG_PRESERVE; |
| 753 | if(m->vf.put_image(&m->vf, mpi, pts) == 0){ |
| 754 | av_log(m->avfctx, AV_LOG_DEBUG, "put_image() says skip\n"); |
| 755 | }else{ |
| 756 | av_frame_free(&inpic); |
| 757 | } |
| 758 | ff_free_mp_image(mpi); |
| 759 | return 0; |
| 760 | } |
| 761 | |
| 762 | static const AVFilterPad mp_inputs[] = { |
| 763 | { |
| 764 | .name = "default", |
| 765 | .type = AVMEDIA_TYPE_VIDEO, |
| 766 | .filter_frame = filter_frame, |
| 767 | .config_props = config_inprops, |
| 768 | }, |
| 769 | { NULL } |
| 770 | }; |
| 771 | |
| 772 | static const AVFilterPad mp_outputs[] = { |
| 773 | { |
| 774 | .name = "default", |
| 775 | .type = AVMEDIA_TYPE_VIDEO, |
| 776 | .request_frame = request_frame, |
| 777 | .config_props = config_outprops, |
| 778 | }, |
| 779 | { NULL } |
| 780 | }; |
| 781 | |
| 782 | AVFilter ff_vf_mp = { |
| 783 | .name = "mp", |
| 784 | .description = NULL_IF_CONFIG_SMALL("Apply a libmpcodecs filter to the input video."), |
| 785 | .init = init, |
| 786 | .uninit = uninit, |
| 787 | .priv_size = sizeof(MPContext), |
| 788 | .query_formats = query_formats, |
| 789 | .inputs = mp_inputs, |
| 790 | .outputs = mp_outputs, |
| 791 | .priv_class = &mp_class, |
| 792 | }; |