| 1 | /* |
| 2 | * - CrystalHD decoder module - |
| 3 | * |
| 4 | * Copyright(C) 2010,2011 Philip Langdale <ffmpeg.philipl@overt.org> |
| 5 | * |
| 6 | * This file is part of FFmpeg. |
| 7 | * |
| 8 | * FFmpeg is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU Lesser General Public |
| 10 | * License as published by the Free Software Foundation; either |
| 11 | * version 2.1 of the License, or (at your option) any later version. |
| 12 | * |
| 13 | * FFmpeg is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 16 | * Lesser General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU Lesser General Public |
| 19 | * License along with FFmpeg; if not, write to the Free Software |
| 20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 21 | */ |
| 22 | |
| 23 | /* |
| 24 | * - Principles of Operation - |
| 25 | * |
| 26 | * The CrystalHD decoder operates at the bitstream level - which is an even |
| 27 | * higher level than the decoding hardware you typically see in modern GPUs. |
| 28 | * This means it has a very simple interface, in principle. You feed demuxed |
| 29 | * packets in one end and get decoded picture (fields/frames) out the other. |
| 30 | * |
| 31 | * Of course, nothing is ever that simple. Due, at the very least, to b-frame |
| 32 | * dependencies in the supported formats, the hardware has a delay between |
| 33 | * when a packet goes in, and when a picture comes out. Furthermore, this delay |
| 34 | * is not just a function of time, but also one of the dependency on additional |
| 35 | * frames being fed into the decoder to satisfy the b-frame dependencies. |
| 36 | * |
| 37 | * As such, a pipeline will build up that is roughly equivalent to the required |
| 38 | * DPB for the file being played. If that was all it took, things would still |
| 39 | * be simple - so, of course, it isn't. |
| 40 | * |
| 41 | * The hardware has a way of indicating that a picture is ready to be copied out, |
| 42 | * but this is unreliable - and sometimes the attempt will still fail so, based |
| 43 | * on testing, the code will wait until 3 pictures are ready before starting |
| 44 | * to copy out - and this has the effect of extending the pipeline. |
| 45 | * |
| 46 | * Finally, while it is tempting to say that once the decoder starts outputting |
| 47 | * frames, the software should never fail to return a frame from a decode(), |
| 48 | * this is a hard assertion to make, because the stream may switch between |
| 49 | * differently encoded content (number of b-frames, interlacing, etc) which |
| 50 | * might require a longer pipeline than before. If that happened, you could |
| 51 | * deadlock trying to retrieve a frame that can't be decoded without feeding |
| 52 | * in additional packets. |
| 53 | * |
| 54 | * As such, the code will return in the event that a picture cannot be copied |
| 55 | * out, leading to an increase in the length of the pipeline. This in turn, |
| 56 | * means we have to be sensitive to the time it takes to decode a picture; |
| 57 | * We do not want to give up just because the hardware needed a little more |
| 58 | * time to prepare the picture! For this reason, there are delays included |
| 59 | * in the decode() path that ensure that, under normal conditions, the hardware |
| 60 | * will only fail to return a frame if it really needs additional packets to |
| 61 | * complete the decoding. |
| 62 | * |
| 63 | * Finally, to be explicit, we do not want the pipeline to grow without bound |
| 64 | * for two reasons: 1) The hardware can only buffer a finite number of packets, |
| 65 | * and 2) The client application may not be able to cope with arbitrarily long |
| 66 | * delays in the video path relative to the audio path. For example. MPlayer |
| 67 | * can only handle a 20 picture delay (although this is arbitrary, and needs |
| 68 | * to be extended to fully support the CrystalHD where the delay could be up |
| 69 | * to 32 pictures - consider PAFF H.264 content with 16 b-frames). |
| 70 | */ |
| 71 | |
| 72 | /***************************************************************************** |
| 73 | * Includes |
| 74 | ****************************************************************************/ |
| 75 | |
| 76 | #define _XOPEN_SOURCE 600 |
| 77 | #include <inttypes.h> |
| 78 | #include <stdio.h> |
| 79 | #include <stdlib.h> |
| 80 | |
| 81 | #include <libcrystalhd/bc_dts_types.h> |
| 82 | #include <libcrystalhd/bc_dts_defs.h> |
| 83 | #include <libcrystalhd/libcrystalhd_if.h> |
| 84 | |
| 85 | #include "avcodec.h" |
| 86 | #include "h264.h" |
| 87 | #include "internal.h" |
| 88 | #include "libavutil/imgutils.h" |
| 89 | #include "libavutil/intreadwrite.h" |
| 90 | #include "libavutil/opt.h" |
| 91 | |
| 92 | #if HAVE_UNISTD_H |
| 93 | #include <unistd.h> |
| 94 | #endif |
| 95 | |
| 96 | /** Timeout parameter passed to DtsProcOutput() in us */ |
| 97 | #define OUTPUT_PROC_TIMEOUT 50 |
| 98 | /** Step between fake timestamps passed to hardware in units of 100ns */ |
| 99 | #define TIMESTAMP_UNIT 100000 |
| 100 | /** Initial value in us of the wait in decode() */ |
| 101 | #define BASE_WAIT 10000 |
| 102 | /** Increment in us to adjust wait in decode() */ |
| 103 | #define WAIT_UNIT 1000 |
| 104 | |
| 105 | |
| 106 | /***************************************************************************** |
| 107 | * Module private data |
| 108 | ****************************************************************************/ |
| 109 | |
| 110 | typedef enum { |
| 111 | RET_ERROR = -1, |
| 112 | RET_OK = 0, |
| 113 | RET_COPY_AGAIN = 1, |
| 114 | RET_SKIP_NEXT_COPY = 2, |
| 115 | RET_COPY_NEXT_FIELD = 3, |
| 116 | } CopyRet; |
| 117 | |
| 118 | typedef struct OpaqueList { |
| 119 | struct OpaqueList *next; |
| 120 | uint64_t fake_timestamp; |
| 121 | uint64_t reordered_opaque; |
| 122 | uint8_t pic_type; |
| 123 | } OpaqueList; |
| 124 | |
| 125 | typedef struct { |
| 126 | AVClass *av_class; |
| 127 | AVCodecContext *avctx; |
| 128 | AVFrame *pic; |
| 129 | HANDLE dev; |
| 130 | |
| 131 | uint8_t *orig_extradata; |
| 132 | uint32_t orig_extradata_size; |
| 133 | |
| 134 | AVBitStreamFilterContext *bsfc; |
| 135 | AVCodecParserContext *parser; |
| 136 | |
| 137 | uint8_t is_70012; |
| 138 | uint8_t *sps_pps_buf; |
| 139 | uint32_t sps_pps_size; |
| 140 | uint8_t is_nal; |
| 141 | uint8_t output_ready; |
| 142 | uint8_t need_second_field; |
| 143 | uint8_t skip_next_output; |
| 144 | uint64_t decode_wait; |
| 145 | |
| 146 | uint64_t last_picture; |
| 147 | |
| 148 | OpaqueList *head; |
| 149 | OpaqueList *tail; |
| 150 | |
| 151 | /* Options */ |
| 152 | uint32_t sWidth; |
| 153 | uint8_t bframe_bug; |
| 154 | } CHDContext; |
| 155 | |
| 156 | static const AVOption options[] = { |
| 157 | { "crystalhd_downscale_width", |
| 158 | "Turn on downscaling to the specified width", |
| 159 | offsetof(CHDContext, sWidth), |
| 160 | AV_OPT_TYPE_INT, {.i64 = 0}, 0, UINT32_MAX, |
| 161 | AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM, }, |
| 162 | { NULL, }, |
| 163 | }; |
| 164 | |
| 165 | |
| 166 | /***************************************************************************** |
| 167 | * Helper functions |
| 168 | ****************************************************************************/ |
| 169 | |
| 170 | static inline BC_MEDIA_SUBTYPE id2subtype(CHDContext *priv, enum AVCodecID id) |
| 171 | { |
| 172 | switch (id) { |
| 173 | case AV_CODEC_ID_MPEG4: |
| 174 | return BC_MSUBTYPE_DIVX; |
| 175 | case AV_CODEC_ID_MSMPEG4V3: |
| 176 | return BC_MSUBTYPE_DIVX311; |
| 177 | case AV_CODEC_ID_MPEG2VIDEO: |
| 178 | return BC_MSUBTYPE_MPEG2VIDEO; |
| 179 | case AV_CODEC_ID_VC1: |
| 180 | return BC_MSUBTYPE_VC1; |
| 181 | case AV_CODEC_ID_WMV3: |
| 182 | return BC_MSUBTYPE_WMV3; |
| 183 | case AV_CODEC_ID_H264: |
| 184 | return priv->is_nal ? BC_MSUBTYPE_AVC1 : BC_MSUBTYPE_H264; |
| 185 | default: |
| 186 | return BC_MSUBTYPE_INVALID; |
| 187 | } |
| 188 | } |
| 189 | |
| 190 | static inline void print_frame_info(CHDContext *priv, BC_DTS_PROC_OUT *output) |
| 191 | { |
| 192 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tYBuffSz: %u\n", output->YbuffSz); |
| 193 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tYBuffDoneSz: %u\n", |
| 194 | output->YBuffDoneSz); |
| 195 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tUVBuffDoneSz: %u\n", |
| 196 | output->UVBuffDoneSz); |
| 197 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tTimestamp: %"PRIu64"\n", |
| 198 | output->PicInfo.timeStamp); |
| 199 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tPicture Number: %u\n", |
| 200 | output->PicInfo.picture_number); |
| 201 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tWidth: %u\n", |
| 202 | output->PicInfo.width); |
| 203 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tHeight: %u\n", |
| 204 | output->PicInfo.height); |
| 205 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tChroma: 0x%03x\n", |
| 206 | output->PicInfo.chroma_format); |
| 207 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tPulldown: %u\n", |
| 208 | output->PicInfo.pulldown); |
| 209 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tFlags: 0x%08x\n", |
| 210 | output->PicInfo.flags); |
| 211 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tFrame Rate/Res: %u\n", |
| 212 | output->PicInfo.frame_rate); |
| 213 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tAspect Ratio: %u\n", |
| 214 | output->PicInfo.aspect_ratio); |
| 215 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tColor Primaries: %u\n", |
| 216 | output->PicInfo.colour_primaries); |
| 217 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tMetaData: %u\n", |
| 218 | output->PicInfo.picture_meta_payload); |
| 219 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tSession Number: %u\n", |
| 220 | output->PicInfo.sess_num); |
| 221 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tycom: %u\n", |
| 222 | output->PicInfo.ycom); |
| 223 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tCustom Aspect: %u\n", |
| 224 | output->PicInfo.custom_aspect_ratio_width_height); |
| 225 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tFrames to Drop: %u\n", |
| 226 | output->PicInfo.n_drop); |
| 227 | av_log(priv->avctx, AV_LOG_VERBOSE, "\tH264 Valid Fields: 0x%08x\n", |
| 228 | output->PicInfo.other.h264.valid); |
| 229 | } |
| 230 | |
| 231 | |
| 232 | /***************************************************************************** |
| 233 | * OpaqueList functions |
| 234 | ****************************************************************************/ |
| 235 | |
| 236 | static uint64_t opaque_list_push(CHDContext *priv, uint64_t reordered_opaque, |
| 237 | uint8_t pic_type) |
| 238 | { |
| 239 | OpaqueList *newNode = av_mallocz(sizeof (OpaqueList)); |
| 240 | if (!newNode) { |
| 241 | av_log(priv->avctx, AV_LOG_ERROR, |
| 242 | "Unable to allocate new node in OpaqueList.\n"); |
| 243 | return 0; |
| 244 | } |
| 245 | if (!priv->head) { |
| 246 | newNode->fake_timestamp = TIMESTAMP_UNIT; |
| 247 | priv->head = newNode; |
| 248 | } else { |
| 249 | newNode->fake_timestamp = priv->tail->fake_timestamp + TIMESTAMP_UNIT; |
| 250 | priv->tail->next = newNode; |
| 251 | } |
| 252 | priv->tail = newNode; |
| 253 | newNode->reordered_opaque = reordered_opaque; |
| 254 | newNode->pic_type = pic_type; |
| 255 | |
| 256 | return newNode->fake_timestamp; |
| 257 | } |
| 258 | |
| 259 | /* |
| 260 | * The OpaqueList is built in decode order, while elements will be removed |
| 261 | * in presentation order. If frames are reordered, this means we must be |
| 262 | * able to remove elements that are not the first element. |
| 263 | * |
| 264 | * Returned node must be freed by caller. |
| 265 | */ |
| 266 | static OpaqueList *opaque_list_pop(CHDContext *priv, uint64_t fake_timestamp) |
| 267 | { |
| 268 | OpaqueList *node = priv->head; |
| 269 | |
| 270 | if (!priv->head) { |
| 271 | av_log(priv->avctx, AV_LOG_ERROR, |
| 272 | "CrystalHD: Attempted to query non-existent timestamps.\n"); |
| 273 | return NULL; |
| 274 | } |
| 275 | |
| 276 | /* |
| 277 | * The first element is special-cased because we have to manipulate |
| 278 | * the head pointer rather than the previous element in the list. |
| 279 | */ |
| 280 | if (priv->head->fake_timestamp == fake_timestamp) { |
| 281 | priv->head = node->next; |
| 282 | |
| 283 | if (!priv->head->next) |
| 284 | priv->tail = priv->head; |
| 285 | |
| 286 | node->next = NULL; |
| 287 | return node; |
| 288 | } |
| 289 | |
| 290 | /* |
| 291 | * The list is processed at arm's length so that we have the |
| 292 | * previous element available to rewrite its next pointer. |
| 293 | */ |
| 294 | while (node->next) { |
| 295 | OpaqueList *current = node->next; |
| 296 | if (current->fake_timestamp == fake_timestamp) { |
| 297 | node->next = current->next; |
| 298 | |
| 299 | if (!node->next) |
| 300 | priv->tail = node; |
| 301 | |
| 302 | current->next = NULL; |
| 303 | return current; |
| 304 | } else { |
| 305 | node = current; |
| 306 | } |
| 307 | } |
| 308 | |
| 309 | av_log(priv->avctx, AV_LOG_VERBOSE, |
| 310 | "CrystalHD: Couldn't match fake_timestamp.\n"); |
| 311 | return NULL; |
| 312 | } |
| 313 | |
| 314 | |
| 315 | /***************************************************************************** |
| 316 | * Video decoder API function definitions |
| 317 | ****************************************************************************/ |
| 318 | |
| 319 | static void flush(AVCodecContext *avctx) |
| 320 | { |
| 321 | CHDContext *priv = avctx->priv_data; |
| 322 | |
| 323 | avctx->has_b_frames = 0; |
| 324 | priv->last_picture = -1; |
| 325 | priv->output_ready = 0; |
| 326 | priv->need_second_field = 0; |
| 327 | priv->skip_next_output = 0; |
| 328 | priv->decode_wait = BASE_WAIT; |
| 329 | |
| 330 | av_frame_unref (priv->pic); |
| 331 | |
| 332 | /* Flush mode 4 flushes all software and hardware buffers. */ |
| 333 | DtsFlushInput(priv->dev, 4); |
| 334 | } |
| 335 | |
| 336 | |
| 337 | static av_cold int uninit(AVCodecContext *avctx) |
| 338 | { |
| 339 | CHDContext *priv = avctx->priv_data; |
| 340 | HANDLE device; |
| 341 | |
| 342 | device = priv->dev; |
| 343 | DtsStopDecoder(device); |
| 344 | DtsCloseDecoder(device); |
| 345 | DtsDeviceClose(device); |
| 346 | |
| 347 | /* |
| 348 | * Restore original extradata, so that if the decoder is |
| 349 | * reinitialised, the bitstream detection and filtering |
| 350 | * will work as expected. |
| 351 | */ |
| 352 | if (priv->orig_extradata) { |
| 353 | av_free(avctx->extradata); |
| 354 | avctx->extradata = priv->orig_extradata; |
| 355 | avctx->extradata_size = priv->orig_extradata_size; |
| 356 | priv->orig_extradata = NULL; |
| 357 | priv->orig_extradata_size = 0; |
| 358 | } |
| 359 | |
| 360 | av_parser_close(priv->parser); |
| 361 | if (priv->bsfc) { |
| 362 | av_bitstream_filter_close(priv->bsfc); |
| 363 | } |
| 364 | |
| 365 | av_freep(&priv->sps_pps_buf); |
| 366 | |
| 367 | av_frame_free (&priv->pic); |
| 368 | |
| 369 | if (priv->head) { |
| 370 | OpaqueList *node = priv->head; |
| 371 | while (node) { |
| 372 | OpaqueList *next = node->next; |
| 373 | av_free(node); |
| 374 | node = next; |
| 375 | } |
| 376 | } |
| 377 | |
| 378 | return 0; |
| 379 | } |
| 380 | |
| 381 | |
| 382 | static av_cold int init(AVCodecContext *avctx) |
| 383 | { |
| 384 | CHDContext* priv; |
| 385 | BC_STATUS ret; |
| 386 | BC_INFO_CRYSTAL version; |
| 387 | BC_INPUT_FORMAT format = { |
| 388 | .FGTEnable = FALSE, |
| 389 | .Progressive = TRUE, |
| 390 | .OptFlags = 0x80000000 | vdecFrameRate59_94 | 0x40, |
| 391 | .width = avctx->width, |
| 392 | .height = avctx->height, |
| 393 | }; |
| 394 | |
| 395 | BC_MEDIA_SUBTYPE subtype; |
| 396 | |
| 397 | uint32_t mode = DTS_PLAYBACK_MODE | |
| 398 | DTS_LOAD_FILE_PLAY_FW | |
| 399 | DTS_SKIP_TX_CHK_CPB | |
| 400 | DTS_PLAYBACK_DROP_RPT_MODE | |
| 401 | DTS_SINGLE_THREADED_MODE | |
| 402 | DTS_DFLT_RESOLUTION(vdecRESOLUTION_1080p23_976); |
| 403 | |
| 404 | av_log(avctx, AV_LOG_VERBOSE, "CrystalHD Init for %s\n", |
| 405 | avctx->codec->name); |
| 406 | |
| 407 | avctx->pix_fmt = AV_PIX_FMT_YUYV422; |
| 408 | |
| 409 | /* Initialize the library */ |
| 410 | priv = avctx->priv_data; |
| 411 | priv->avctx = avctx; |
| 412 | priv->is_nal = avctx->extradata_size > 0 && *(avctx->extradata) == 1; |
| 413 | priv->last_picture = -1; |
| 414 | priv->decode_wait = BASE_WAIT; |
| 415 | priv->pic = av_frame_alloc(); |
| 416 | |
| 417 | subtype = id2subtype(priv, avctx->codec->id); |
| 418 | switch (subtype) { |
| 419 | case BC_MSUBTYPE_AVC1: |
| 420 | { |
| 421 | uint8_t *dummy_p; |
| 422 | int dummy_int; |
| 423 | |
| 424 | /* Back up the extradata so it can be restored at close time. */ |
| 425 | priv->orig_extradata = av_malloc(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); |
| 426 | if (!priv->orig_extradata) { |
| 427 | av_log(avctx, AV_LOG_ERROR, |
| 428 | "Failed to allocate copy of extradata\n"); |
| 429 | return AVERROR(ENOMEM); |
| 430 | } |
| 431 | priv->orig_extradata_size = avctx->extradata_size; |
| 432 | memcpy(priv->orig_extradata, avctx->extradata, avctx->extradata_size); |
| 433 | |
| 434 | priv->bsfc = av_bitstream_filter_init("h264_mp4toannexb"); |
| 435 | if (!priv->bsfc) { |
| 436 | av_log(avctx, AV_LOG_ERROR, |
| 437 | "Cannot open the h264_mp4toannexb BSF!\n"); |
| 438 | return AVERROR_BSF_NOT_FOUND; |
| 439 | } |
| 440 | av_bitstream_filter_filter(priv->bsfc, avctx, NULL, &dummy_p, |
| 441 | &dummy_int, NULL, 0, 0); |
| 442 | } |
| 443 | subtype = BC_MSUBTYPE_H264; |
| 444 | // Fall-through |
| 445 | case BC_MSUBTYPE_H264: |
| 446 | format.startCodeSz = 4; |
| 447 | // Fall-through |
| 448 | case BC_MSUBTYPE_VC1: |
| 449 | case BC_MSUBTYPE_WVC1: |
| 450 | case BC_MSUBTYPE_WMV3: |
| 451 | case BC_MSUBTYPE_WMVA: |
| 452 | case BC_MSUBTYPE_MPEG2VIDEO: |
| 453 | case BC_MSUBTYPE_DIVX: |
| 454 | case BC_MSUBTYPE_DIVX311: |
| 455 | format.pMetaData = avctx->extradata; |
| 456 | format.metaDataSz = avctx->extradata_size; |
| 457 | break; |
| 458 | default: |
| 459 | av_log(avctx, AV_LOG_ERROR, "CrystalHD: Unknown codec name\n"); |
| 460 | return AVERROR(EINVAL); |
| 461 | } |
| 462 | format.mSubtype = subtype; |
| 463 | |
| 464 | if (priv->sWidth) { |
| 465 | format.bEnableScaling = 1; |
| 466 | format.ScalingParams.sWidth = priv->sWidth; |
| 467 | } |
| 468 | |
| 469 | /* Get a decoder instance */ |
| 470 | av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: starting up\n"); |
| 471 | // Initialize the Link and Decoder devices |
| 472 | ret = DtsDeviceOpen(&priv->dev, mode); |
| 473 | if (ret != BC_STS_SUCCESS) { |
| 474 | av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: DtsDeviceOpen failed\n"); |
| 475 | goto fail; |
| 476 | } |
| 477 | |
| 478 | ret = DtsCrystalHDVersion(priv->dev, &version); |
| 479 | if (ret != BC_STS_SUCCESS) { |
| 480 | av_log(avctx, AV_LOG_VERBOSE, |
| 481 | "CrystalHD: DtsCrystalHDVersion failed\n"); |
| 482 | goto fail; |
| 483 | } |
| 484 | priv->is_70012 = version.device == 0; |
| 485 | |
| 486 | if (priv->is_70012 && |
| 487 | (subtype == BC_MSUBTYPE_DIVX || subtype == BC_MSUBTYPE_DIVX311)) { |
| 488 | av_log(avctx, AV_LOG_VERBOSE, |
| 489 | "CrystalHD: BCM70012 doesn't support MPEG4-ASP/DivX/Xvid\n"); |
| 490 | goto fail; |
| 491 | } |
| 492 | |
| 493 | ret = DtsSetInputFormat(priv->dev, &format); |
| 494 | if (ret != BC_STS_SUCCESS) { |
| 495 | av_log(avctx, AV_LOG_ERROR, "CrystalHD: SetInputFormat failed\n"); |
| 496 | goto fail; |
| 497 | } |
| 498 | |
| 499 | ret = DtsOpenDecoder(priv->dev, BC_STREAM_TYPE_ES); |
| 500 | if (ret != BC_STS_SUCCESS) { |
| 501 | av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsOpenDecoder failed\n"); |
| 502 | goto fail; |
| 503 | } |
| 504 | |
| 505 | ret = DtsSetColorSpace(priv->dev, OUTPUT_MODE422_YUY2); |
| 506 | if (ret != BC_STS_SUCCESS) { |
| 507 | av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsSetColorSpace failed\n"); |
| 508 | goto fail; |
| 509 | } |
| 510 | ret = DtsStartDecoder(priv->dev); |
| 511 | if (ret != BC_STS_SUCCESS) { |
| 512 | av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartDecoder failed\n"); |
| 513 | goto fail; |
| 514 | } |
| 515 | ret = DtsStartCapture(priv->dev); |
| 516 | if (ret != BC_STS_SUCCESS) { |
| 517 | av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartCapture failed\n"); |
| 518 | goto fail; |
| 519 | } |
| 520 | |
| 521 | if (avctx->codec->id == AV_CODEC_ID_H264) { |
| 522 | priv->parser = av_parser_init(avctx->codec->id); |
| 523 | if (!priv->parser) |
| 524 | av_log(avctx, AV_LOG_WARNING, |
| 525 | "Cannot open the h.264 parser! Interlaced h.264 content " |
| 526 | "will not be detected reliably.\n"); |
| 527 | priv->parser->flags = PARSER_FLAG_COMPLETE_FRAMES; |
| 528 | } |
| 529 | av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Init complete.\n"); |
| 530 | |
| 531 | return 0; |
| 532 | |
| 533 | fail: |
| 534 | uninit(avctx); |
| 535 | return -1; |
| 536 | } |
| 537 | |
| 538 | |
| 539 | static inline CopyRet copy_frame(AVCodecContext *avctx, |
| 540 | BC_DTS_PROC_OUT *output, |
| 541 | void *data, int *got_frame) |
| 542 | { |
| 543 | BC_STATUS ret; |
| 544 | BC_DTS_STATUS decoder_status = { 0, }; |
| 545 | uint8_t trust_interlaced; |
| 546 | uint8_t interlaced; |
| 547 | |
| 548 | CHDContext *priv = avctx->priv_data; |
| 549 | int64_t pkt_pts = AV_NOPTS_VALUE; |
| 550 | uint8_t pic_type = 0; |
| 551 | |
| 552 | uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) == |
| 553 | VDEC_FLAG_BOTTOMFIELD; |
| 554 | uint8_t bottom_first = !!(output->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST); |
| 555 | |
| 556 | int width = output->PicInfo.width; |
| 557 | int height = output->PicInfo.height; |
| 558 | int bwidth; |
| 559 | uint8_t *src = output->Ybuff; |
| 560 | int sStride; |
| 561 | uint8_t *dst; |
| 562 | int dStride; |
| 563 | |
| 564 | if (output->PicInfo.timeStamp != 0) { |
| 565 | OpaqueList *node = opaque_list_pop(priv, output->PicInfo.timeStamp); |
| 566 | if (node) { |
| 567 | pkt_pts = node->reordered_opaque; |
| 568 | pic_type = node->pic_type; |
| 569 | av_free(node); |
| 570 | } else { |
| 571 | /* |
| 572 | * We will encounter a situation where a timestamp cannot be |
| 573 | * popped if a second field is being returned. In this case, |
| 574 | * each field has the same timestamp and the first one will |
| 575 | * cause it to be popped. To keep subsequent calculations |
| 576 | * simple, pic_type should be set a FIELD value - doesn't |
| 577 | * matter which, but I chose BOTTOM. |
| 578 | */ |
| 579 | pic_type = PICT_BOTTOM_FIELD; |
| 580 | } |
| 581 | av_log(avctx, AV_LOG_VERBOSE, "output \"pts\": %"PRIu64"\n", |
| 582 | output->PicInfo.timeStamp); |
| 583 | av_log(avctx, AV_LOG_VERBOSE, "output picture type %d\n", |
| 584 | pic_type); |
| 585 | } |
| 586 | |
| 587 | ret = DtsGetDriverStatus(priv->dev, &decoder_status); |
| 588 | if (ret != BC_STS_SUCCESS) { |
| 589 | av_log(avctx, AV_LOG_ERROR, |
| 590 | "CrystalHD: GetDriverStatus failed: %u\n", ret); |
| 591 | return RET_ERROR; |
| 592 | } |
| 593 | |
| 594 | /* |
| 595 | * For most content, we can trust the interlaced flag returned |
| 596 | * by the hardware, but sometimes we can't. These are the |
| 597 | * conditions under which we can trust the flag: |
| 598 | * |
| 599 | * 1) It's not h.264 content |
| 600 | * 2) The UNKNOWN_SRC flag is not set |
| 601 | * 3) We know we're expecting a second field |
| 602 | * 4) The hardware reports this picture and the next picture |
| 603 | * have the same picture number. |
| 604 | * |
| 605 | * Note that there can still be interlaced content that will |
| 606 | * fail this check, if the hardware hasn't decoded the next |
| 607 | * picture or if there is a corruption in the stream. (In either |
| 608 | * case a 0 will be returned for the next picture number) |
| 609 | */ |
| 610 | trust_interlaced = avctx->codec->id != AV_CODEC_ID_H264 || |
| 611 | !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) || |
| 612 | priv->need_second_field || |
| 613 | (decoder_status.picNumFlags & ~0x40000000) == |
| 614 | output->PicInfo.picture_number; |
| 615 | |
| 616 | /* |
| 617 | * If we got a false negative for trust_interlaced on the first field, |
| 618 | * we will realise our mistake here when we see that the picture number is that |
| 619 | * of the previous picture. We cannot recover the frame and should discard the |
| 620 | * second field to keep the correct number of output frames. |
| 621 | */ |
| 622 | if (output->PicInfo.picture_number == priv->last_picture && !priv->need_second_field) { |
| 623 | av_log(avctx, AV_LOG_WARNING, |
| 624 | "Incorrectly guessed progressive frame. Discarding second field\n"); |
| 625 | /* Returning without providing a picture. */ |
| 626 | return RET_OK; |
| 627 | } |
| 628 | |
| 629 | interlaced = (output->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC) && |
| 630 | trust_interlaced; |
| 631 | |
| 632 | if (!trust_interlaced && (decoder_status.picNumFlags & ~0x40000000) == 0) { |
| 633 | av_log(avctx, AV_LOG_VERBOSE, |
| 634 | "Next picture number unknown. Assuming progressive frame.\n"); |
| 635 | } |
| 636 | |
| 637 | av_log(avctx, AV_LOG_VERBOSE, "Interlaced state: %d | trust_interlaced %d\n", |
| 638 | interlaced, trust_interlaced); |
| 639 | |
| 640 | if (priv->pic->data[0] && !priv->need_second_field) |
| 641 | av_frame_unref(priv->pic); |
| 642 | |
| 643 | priv->need_second_field = interlaced && !priv->need_second_field; |
| 644 | |
| 645 | if (!priv->pic->data[0]) { |
| 646 | if (ff_get_buffer(avctx, priv->pic, AV_GET_BUFFER_FLAG_REF) < 0) |
| 647 | return RET_ERROR; |
| 648 | } |
| 649 | |
| 650 | bwidth = av_image_get_linesize(avctx->pix_fmt, width, 0); |
| 651 | if (priv->is_70012) { |
| 652 | int pStride; |
| 653 | |
| 654 | if (width <= 720) |
| 655 | pStride = 720; |
| 656 | else if (width <= 1280) |
| 657 | pStride = 1280; |
| 658 | else pStride = 1920; |
| 659 | sStride = av_image_get_linesize(avctx->pix_fmt, pStride, 0); |
| 660 | } else { |
| 661 | sStride = bwidth; |
| 662 | } |
| 663 | |
| 664 | dStride = priv->pic->linesize[0]; |
| 665 | dst = priv->pic->data[0]; |
| 666 | |
| 667 | av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n"); |
| 668 | |
| 669 | if (interlaced) { |
| 670 | int dY = 0; |
| 671 | int sY = 0; |
| 672 | |
| 673 | height /= 2; |
| 674 | if (bottom_field) { |
| 675 | av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: bottom field\n"); |
| 676 | dY = 1; |
| 677 | } else { |
| 678 | av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: top field\n"); |
| 679 | dY = 0; |
| 680 | } |
| 681 | |
| 682 | for (sY = 0; sY < height; dY++, sY++) { |
| 683 | memcpy(&(dst[dY * dStride]), &(src[sY * sStride]), bwidth); |
| 684 | dY++; |
| 685 | } |
| 686 | } else { |
| 687 | av_image_copy_plane(dst, dStride, src, sStride, bwidth, height); |
| 688 | } |
| 689 | |
| 690 | priv->pic->interlaced_frame = interlaced; |
| 691 | if (interlaced) |
| 692 | priv->pic->top_field_first = !bottom_first; |
| 693 | |
| 694 | priv->pic->pkt_pts = pkt_pts; |
| 695 | |
| 696 | if (!priv->need_second_field) { |
| 697 | *got_frame = 1; |
| 698 | if ((ret = av_frame_ref(data, priv->pic)) < 0) { |
| 699 | return ret; |
| 700 | } |
| 701 | } |
| 702 | |
| 703 | /* |
| 704 | * Two types of PAFF content have been observed. One form causes the |
| 705 | * hardware to return a field pair and the other individual fields, |
| 706 | * even though the input is always individual fields. We must skip |
| 707 | * copying on the next decode() call to maintain pipeline length in |
| 708 | * the first case. |
| 709 | */ |
| 710 | if (!interlaced && (output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) && |
| 711 | (pic_type == PICT_TOP_FIELD || pic_type == PICT_BOTTOM_FIELD)) { |
| 712 | av_log(priv->avctx, AV_LOG_VERBOSE, "Fieldpair from two packets.\n"); |
| 713 | return RET_SKIP_NEXT_COPY; |
| 714 | } |
| 715 | |
| 716 | /* |
| 717 | * The logic here is purely based on empirical testing with samples. |
| 718 | * If we need a second field, it could come from a second input packet, |
| 719 | * or it could come from the same field-pair input packet at the current |
| 720 | * field. In the first case, we should return and wait for the next time |
| 721 | * round to get the second field, while in the second case, we should |
| 722 | * ask the decoder for it immediately. |
| 723 | * |
| 724 | * Testing has shown that we are dealing with the fieldpair -> two fields |
| 725 | * case if the VDEC_FLAG_UNKNOWN_SRC is not set or if the input picture |
| 726 | * type was PICT_FRAME (in this second case, the flag might still be set) |
| 727 | */ |
| 728 | return priv->need_second_field && |
| 729 | (!(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) || |
| 730 | pic_type == PICT_FRAME) ? |
| 731 | RET_COPY_NEXT_FIELD : RET_OK; |
| 732 | } |
| 733 | |
| 734 | |
| 735 | static inline CopyRet receive_frame(AVCodecContext *avctx, |
| 736 | void *data, int *got_frame) |
| 737 | { |
| 738 | BC_STATUS ret; |
| 739 | BC_DTS_PROC_OUT output = { |
| 740 | .PicInfo.width = avctx->width, |
| 741 | .PicInfo.height = avctx->height, |
| 742 | }; |
| 743 | CHDContext *priv = avctx->priv_data; |
| 744 | HANDLE dev = priv->dev; |
| 745 | |
| 746 | *got_frame = 0; |
| 747 | |
| 748 | // Request decoded data from the driver |
| 749 | ret = DtsProcOutputNoCopy(dev, OUTPUT_PROC_TIMEOUT, &output); |
| 750 | if (ret == BC_STS_FMT_CHANGE) { |
| 751 | av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Initial format change\n"); |
| 752 | avctx->width = output.PicInfo.width; |
| 753 | avctx->height = output.PicInfo.height; |
| 754 | switch ( output.PicInfo.aspect_ratio ) { |
| 755 | case vdecAspectRatioSquare: |
| 756 | avctx->sample_aspect_ratio = (AVRational) { 1, 1}; |
| 757 | break; |
| 758 | case vdecAspectRatio12_11: |
| 759 | avctx->sample_aspect_ratio = (AVRational) { 12, 11}; |
| 760 | break; |
| 761 | case vdecAspectRatio10_11: |
| 762 | avctx->sample_aspect_ratio = (AVRational) { 10, 11}; |
| 763 | break; |
| 764 | case vdecAspectRatio16_11: |
| 765 | avctx->sample_aspect_ratio = (AVRational) { 16, 11}; |
| 766 | break; |
| 767 | case vdecAspectRatio40_33: |
| 768 | avctx->sample_aspect_ratio = (AVRational) { 40, 33}; |
| 769 | break; |
| 770 | case vdecAspectRatio24_11: |
| 771 | avctx->sample_aspect_ratio = (AVRational) { 24, 11}; |
| 772 | break; |
| 773 | case vdecAspectRatio20_11: |
| 774 | avctx->sample_aspect_ratio = (AVRational) { 20, 11}; |
| 775 | break; |
| 776 | case vdecAspectRatio32_11: |
| 777 | avctx->sample_aspect_ratio = (AVRational) { 32, 11}; |
| 778 | break; |
| 779 | case vdecAspectRatio80_33: |
| 780 | avctx->sample_aspect_ratio = (AVRational) { 80, 33}; |
| 781 | break; |
| 782 | case vdecAspectRatio18_11: |
| 783 | avctx->sample_aspect_ratio = (AVRational) { 18, 11}; |
| 784 | break; |
| 785 | case vdecAspectRatio15_11: |
| 786 | avctx->sample_aspect_ratio = (AVRational) { 15, 11}; |
| 787 | break; |
| 788 | case vdecAspectRatio64_33: |
| 789 | avctx->sample_aspect_ratio = (AVRational) { 64, 33}; |
| 790 | break; |
| 791 | case vdecAspectRatio160_99: |
| 792 | avctx->sample_aspect_ratio = (AVRational) {160, 99}; |
| 793 | break; |
| 794 | case vdecAspectRatio4_3: |
| 795 | avctx->sample_aspect_ratio = (AVRational) { 4, 3}; |
| 796 | break; |
| 797 | case vdecAspectRatio16_9: |
| 798 | avctx->sample_aspect_ratio = (AVRational) { 16, 9}; |
| 799 | break; |
| 800 | case vdecAspectRatio221_1: |
| 801 | avctx->sample_aspect_ratio = (AVRational) {221, 1}; |
| 802 | break; |
| 803 | } |
| 804 | return RET_COPY_AGAIN; |
| 805 | } else if (ret == BC_STS_SUCCESS) { |
| 806 | int copy_ret = -1; |
| 807 | if (output.PoutFlags & BC_POUT_FLAGS_PIB_VALID) { |
| 808 | if (priv->last_picture == -1) { |
| 809 | /* |
| 810 | * Init to one less, so that the incrementing code doesn't |
| 811 | * need to be special-cased. |
| 812 | */ |
| 813 | priv->last_picture = output.PicInfo.picture_number - 1; |
| 814 | } |
| 815 | |
| 816 | if (avctx->codec->id == AV_CODEC_ID_MPEG4 && |
| 817 | output.PicInfo.timeStamp == 0 && priv->bframe_bug) { |
| 818 | av_log(avctx, AV_LOG_VERBOSE, |
| 819 | "CrystalHD: Not returning packed frame twice.\n"); |
| 820 | priv->last_picture++; |
| 821 | DtsReleaseOutputBuffs(dev, NULL, FALSE); |
| 822 | return RET_COPY_AGAIN; |
| 823 | } |
| 824 | |
| 825 | print_frame_info(priv, &output); |
| 826 | |
| 827 | if (priv->last_picture + 1 < output.PicInfo.picture_number) { |
| 828 | av_log(avctx, AV_LOG_WARNING, |
| 829 | "CrystalHD: Picture Number discontinuity\n"); |
| 830 | /* |
| 831 | * Have we lost frames? If so, we need to shrink the |
| 832 | * pipeline length appropriately. |
| 833 | * |
| 834 | * XXX: I have no idea what the semantics of this situation |
| 835 | * are so I don't even know if we've lost frames or which |
| 836 | * ones. |
| 837 | * |
| 838 | * In any case, only warn the first time. |
| 839 | */ |
| 840 | priv->last_picture = output.PicInfo.picture_number - 1; |
| 841 | } |
| 842 | |
| 843 | copy_ret = copy_frame(avctx, &output, data, got_frame); |
| 844 | if (*got_frame > 0) { |
| 845 | avctx->has_b_frames--; |
| 846 | priv->last_picture++; |
| 847 | av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Pipeline length: %u\n", |
| 848 | avctx->has_b_frames); |
| 849 | } |
| 850 | } else { |
| 851 | /* |
| 852 | * An invalid frame has been consumed. |
| 853 | */ |
| 854 | av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput succeeded with " |
| 855 | "invalid PIB\n"); |
| 856 | avctx->has_b_frames--; |
| 857 | copy_ret = RET_OK; |
| 858 | } |
| 859 | DtsReleaseOutputBuffs(dev, NULL, FALSE); |
| 860 | |
| 861 | return copy_ret; |
| 862 | } else if (ret == BC_STS_BUSY) { |
| 863 | return RET_COPY_AGAIN; |
| 864 | } else { |
| 865 | av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput failed %d\n", ret); |
| 866 | return RET_ERROR; |
| 867 | } |
| 868 | } |
| 869 | |
| 870 | |
| 871 | static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) |
| 872 | { |
| 873 | BC_STATUS ret; |
| 874 | BC_DTS_STATUS decoder_status = { 0, }; |
| 875 | CopyRet rec_ret; |
| 876 | CHDContext *priv = avctx->priv_data; |
| 877 | HANDLE dev = priv->dev; |
| 878 | uint8_t *in_data = avpkt->data; |
| 879 | int len = avpkt->size; |
| 880 | int free_data = 0; |
| 881 | uint8_t pic_type = 0; |
| 882 | |
| 883 | av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: decode_frame\n"); |
| 884 | |
| 885 | if (avpkt->size == 7 && !priv->bframe_bug) { |
| 886 | /* |
| 887 | * The use of a drop frame triggers the bug |
| 888 | */ |
| 889 | av_log(avctx, AV_LOG_INFO, |
| 890 | "CrystalHD: Enabling work-around for packed b-frame bug\n"); |
| 891 | priv->bframe_bug = 1; |
| 892 | } else if (avpkt->size == 8 && priv->bframe_bug) { |
| 893 | /* |
| 894 | * Delay frames don't trigger the bug |
| 895 | */ |
| 896 | av_log(avctx, AV_LOG_INFO, |
| 897 | "CrystalHD: Disabling work-around for packed b-frame bug\n"); |
| 898 | priv->bframe_bug = 0; |
| 899 | } |
| 900 | |
| 901 | if (len) { |
| 902 | int32_t tx_free = (int32_t)DtsTxFreeSize(dev); |
| 903 | |
| 904 | if (priv->parser) { |
| 905 | int ret = 0; |
| 906 | |
| 907 | if (priv->bsfc) { |
| 908 | ret = av_bitstream_filter_filter(priv->bsfc, avctx, NULL, |
| 909 | &in_data, &len, |
| 910 | avpkt->data, len, 0); |
| 911 | } |
| 912 | free_data = ret > 0; |
| 913 | |
| 914 | if (ret >= 0) { |
| 915 | uint8_t *pout; |
| 916 | int psize; |
| 917 | int index; |
| 918 | H264Context *h = priv->parser->priv_data; |
| 919 | |
| 920 | index = av_parser_parse2(priv->parser, avctx, &pout, &psize, |
| 921 | in_data, len, avctx->internal->pkt->pts, |
| 922 | avctx->internal->pkt->dts, 0); |
| 923 | if (index < 0) { |
| 924 | av_log(avctx, AV_LOG_WARNING, |
| 925 | "CrystalHD: Failed to parse h.264 packet to " |
| 926 | "detect interlacing.\n"); |
| 927 | } else if (index != len) { |
| 928 | av_log(avctx, AV_LOG_WARNING, |
| 929 | "CrystalHD: Failed to parse h.264 packet " |
| 930 | "completely. Interlaced frames may be " |
| 931 | "incorrectly detected.\n"); |
| 932 | } else { |
| 933 | av_log(avctx, AV_LOG_VERBOSE, |
| 934 | "CrystalHD: parser picture type %d\n", |
| 935 | h->picture_structure); |
| 936 | pic_type = h->picture_structure; |
| 937 | } |
| 938 | } else { |
| 939 | av_log(avctx, AV_LOG_WARNING, |
| 940 | "CrystalHD: mp4toannexb filter failed to filter " |
| 941 | "packet. Interlaced frames may be incorrectly " |
| 942 | "detected.\n"); |
| 943 | } |
| 944 | } |
| 945 | |
| 946 | if (len < tx_free - 1024) { |
| 947 | /* |
| 948 | * Despite being notionally opaque, either libcrystalhd or |
| 949 | * the hardware itself will mangle pts values that are too |
| 950 | * small or too large. The docs claim it should be in units |
| 951 | * of 100ns. Given that we're nominally dealing with a black |
| 952 | * box on both sides, any transform we do has no guarantee of |
| 953 | * avoiding mangling so we need to build a mapping to values |
| 954 | * we know will not be mangled. |
| 955 | */ |
| 956 | uint64_t pts = opaque_list_push(priv, avctx->internal->pkt->pts, pic_type); |
| 957 | if (!pts) { |
| 958 | if (free_data) { |
| 959 | av_freep(&in_data); |
| 960 | } |
| 961 | return AVERROR(ENOMEM); |
| 962 | } |
| 963 | av_log(priv->avctx, AV_LOG_VERBOSE, |
| 964 | "input \"pts\": %"PRIu64"\n", pts); |
| 965 | ret = DtsProcInput(dev, in_data, len, pts, 0); |
| 966 | if (free_data) { |
| 967 | av_freep(&in_data); |
| 968 | } |
| 969 | if (ret == BC_STS_BUSY) { |
| 970 | av_log(avctx, AV_LOG_WARNING, |
| 971 | "CrystalHD: ProcInput returned busy\n"); |
| 972 | usleep(BASE_WAIT); |
| 973 | return AVERROR(EBUSY); |
| 974 | } else if (ret != BC_STS_SUCCESS) { |
| 975 | av_log(avctx, AV_LOG_ERROR, |
| 976 | "CrystalHD: ProcInput failed: %u\n", ret); |
| 977 | return -1; |
| 978 | } |
| 979 | avctx->has_b_frames++; |
| 980 | } else { |
| 981 | av_log(avctx, AV_LOG_WARNING, "CrystalHD: Input buffer full\n"); |
| 982 | len = 0; // We didn't consume any bytes. |
| 983 | } |
| 984 | } else { |
| 985 | av_log(avctx, AV_LOG_INFO, "CrystalHD: No more input data\n"); |
| 986 | } |
| 987 | |
| 988 | if (priv->skip_next_output) { |
| 989 | av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Skipping next output.\n"); |
| 990 | priv->skip_next_output = 0; |
| 991 | avctx->has_b_frames--; |
| 992 | return len; |
| 993 | } |
| 994 | |
| 995 | ret = DtsGetDriverStatus(dev, &decoder_status); |
| 996 | if (ret != BC_STS_SUCCESS) { |
| 997 | av_log(avctx, AV_LOG_ERROR, "CrystalHD: GetDriverStatus failed\n"); |
| 998 | return -1; |
| 999 | } |
| 1000 | |
| 1001 | /* |
| 1002 | * No frames ready. Don't try to extract. |
| 1003 | * |
| 1004 | * Empirical testing shows that ReadyListCount can be a damn lie, |
| 1005 | * and ProcOut still fails when count > 0. The same testing showed |
| 1006 | * that two more iterations were needed before ProcOutput would |
| 1007 | * succeed. |
| 1008 | */ |
| 1009 | if (priv->output_ready < 2) { |
| 1010 | if (decoder_status.ReadyListCount != 0) |
| 1011 | priv->output_ready++; |
| 1012 | usleep(BASE_WAIT); |
| 1013 | av_log(avctx, AV_LOG_INFO, "CrystalHD: Filling pipeline.\n"); |
| 1014 | return len; |
| 1015 | } else if (decoder_status.ReadyListCount == 0) { |
| 1016 | /* |
| 1017 | * After the pipeline is established, if we encounter a lack of frames |
| 1018 | * that probably means we're not giving the hardware enough time to |
| 1019 | * decode them, so start increasing the wait time at the end of a |
| 1020 | * decode call. |
| 1021 | */ |
| 1022 | usleep(BASE_WAIT); |
| 1023 | priv->decode_wait += WAIT_UNIT; |
| 1024 | av_log(avctx, AV_LOG_INFO, "CrystalHD: No frames ready. Returning\n"); |
| 1025 | return len; |
| 1026 | } |
| 1027 | |
| 1028 | do { |
| 1029 | rec_ret = receive_frame(avctx, data, got_frame); |
| 1030 | if (rec_ret == RET_OK && *got_frame == 0) { |
| 1031 | /* |
| 1032 | * This case is for when the encoded fields are stored |
| 1033 | * separately and we get a separate avpkt for each one. To keep |
| 1034 | * the pipeline stable, we should return nothing and wait for |
| 1035 | * the next time round to grab the second field. |
| 1036 | * H.264 PAFF is an example of this. |
| 1037 | */ |
| 1038 | av_log(avctx, AV_LOG_VERBOSE, "Returning after first field.\n"); |
| 1039 | avctx->has_b_frames--; |
| 1040 | } else if (rec_ret == RET_COPY_NEXT_FIELD) { |
| 1041 | /* |
| 1042 | * This case is for when the encoded fields are stored in a |
| 1043 | * single avpkt but the hardware returns then separately. Unless |
| 1044 | * we grab the second field before returning, we'll slip another |
| 1045 | * frame in the pipeline and if that happens a lot, we're sunk. |
| 1046 | * So we have to get that second field now. |
| 1047 | * Interlaced mpeg2 and vc1 are examples of this. |
| 1048 | */ |
| 1049 | av_log(avctx, AV_LOG_VERBOSE, "Trying to get second field.\n"); |
| 1050 | while (1) { |
| 1051 | usleep(priv->decode_wait); |
| 1052 | ret = DtsGetDriverStatus(dev, &decoder_status); |
| 1053 | if (ret == BC_STS_SUCCESS && |
| 1054 | decoder_status.ReadyListCount > 0) { |
| 1055 | rec_ret = receive_frame(avctx, data, got_frame); |
| 1056 | if ((rec_ret == RET_OK && *got_frame > 0) || |
| 1057 | rec_ret == RET_ERROR) |
| 1058 | break; |
| 1059 | } |
| 1060 | } |
| 1061 | av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Got second field.\n"); |
| 1062 | } else if (rec_ret == RET_SKIP_NEXT_COPY) { |
| 1063 | /* |
| 1064 | * Two input packets got turned into a field pair. Gawd. |
| 1065 | */ |
| 1066 | av_log(avctx, AV_LOG_VERBOSE, |
| 1067 | "Don't output on next decode call.\n"); |
| 1068 | priv->skip_next_output = 1; |
| 1069 | } |
| 1070 | /* |
| 1071 | * If rec_ret == RET_COPY_AGAIN, that means that either we just handled |
| 1072 | * a FMT_CHANGE event and need to go around again for the actual frame, |
| 1073 | * we got a busy status and need to try again, or we're dealing with |
| 1074 | * packed b-frames, where the hardware strangely returns the packed |
| 1075 | * p-frame twice. We choose to keep the second copy as it carries the |
| 1076 | * valid pts. |
| 1077 | */ |
| 1078 | } while (rec_ret == RET_COPY_AGAIN); |
| 1079 | usleep(priv->decode_wait); |
| 1080 | return len; |
| 1081 | } |
| 1082 | |
| 1083 | |
| 1084 | #if CONFIG_H264_CRYSTALHD_DECODER |
| 1085 | static AVClass h264_class = { |
| 1086 | "h264_crystalhd", |
| 1087 | av_default_item_name, |
| 1088 | options, |
| 1089 | LIBAVUTIL_VERSION_INT, |
| 1090 | }; |
| 1091 | |
| 1092 | AVCodec ff_h264_crystalhd_decoder = { |
| 1093 | .name = "h264_crystalhd", |
| 1094 | .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (CrystalHD acceleration)"), |
| 1095 | .type = AVMEDIA_TYPE_VIDEO, |
| 1096 | .id = AV_CODEC_ID_H264, |
| 1097 | .priv_data_size = sizeof(CHDContext), |
| 1098 | .init = init, |
| 1099 | .close = uninit, |
| 1100 | .decode = decode, |
| 1101 | .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, |
| 1102 | .flush = flush, |
| 1103 | .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE}, |
| 1104 | .priv_class = &h264_class, |
| 1105 | }; |
| 1106 | #endif |
| 1107 | |
| 1108 | #if CONFIG_MPEG2_CRYSTALHD_DECODER |
| 1109 | static AVClass mpeg2_class = { |
| 1110 | "mpeg2_crystalhd", |
| 1111 | av_default_item_name, |
| 1112 | options, |
| 1113 | LIBAVUTIL_VERSION_INT, |
| 1114 | }; |
| 1115 | |
| 1116 | AVCodec ff_mpeg2_crystalhd_decoder = { |
| 1117 | .name = "mpeg2_crystalhd", |
| 1118 | .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 Video (CrystalHD acceleration)"), |
| 1119 | .type = AVMEDIA_TYPE_VIDEO, |
| 1120 | .id = AV_CODEC_ID_MPEG2VIDEO, |
| 1121 | .priv_data_size = sizeof(CHDContext), |
| 1122 | .init = init, |
| 1123 | .close = uninit, |
| 1124 | .decode = decode, |
| 1125 | .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, |
| 1126 | .flush = flush, |
| 1127 | .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE}, |
| 1128 | .priv_class = &mpeg2_class, |
| 1129 | }; |
| 1130 | #endif |
| 1131 | |
| 1132 | #if CONFIG_MPEG4_CRYSTALHD_DECODER |
| 1133 | static AVClass mpeg4_class = { |
| 1134 | "mpeg4_crystalhd", |
| 1135 | av_default_item_name, |
| 1136 | options, |
| 1137 | LIBAVUTIL_VERSION_INT, |
| 1138 | }; |
| 1139 | |
| 1140 | AVCodec ff_mpeg4_crystalhd_decoder = { |
| 1141 | .name = "mpeg4_crystalhd", |
| 1142 | .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Part 2 (CrystalHD acceleration)"), |
| 1143 | .type = AVMEDIA_TYPE_VIDEO, |
| 1144 | .id = AV_CODEC_ID_MPEG4, |
| 1145 | .priv_data_size = sizeof(CHDContext), |
| 1146 | .init = init, |
| 1147 | .close = uninit, |
| 1148 | .decode = decode, |
| 1149 | .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, |
| 1150 | .flush = flush, |
| 1151 | .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE}, |
| 1152 | .priv_class = &mpeg4_class, |
| 1153 | }; |
| 1154 | #endif |
| 1155 | |
| 1156 | #if CONFIG_MSMPEG4_CRYSTALHD_DECODER |
| 1157 | static AVClass msmpeg4_class = { |
| 1158 | "msmpeg4_crystalhd", |
| 1159 | av_default_item_name, |
| 1160 | options, |
| 1161 | LIBAVUTIL_VERSION_INT, |
| 1162 | }; |
| 1163 | |
| 1164 | AVCodec ff_msmpeg4_crystalhd_decoder = { |
| 1165 | .name = "msmpeg4_crystalhd", |
| 1166 | .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Part 2 Microsoft variant version 3 (CrystalHD acceleration)"), |
| 1167 | .type = AVMEDIA_TYPE_VIDEO, |
| 1168 | .id = AV_CODEC_ID_MSMPEG4V3, |
| 1169 | .priv_data_size = sizeof(CHDContext), |
| 1170 | .init = init, |
| 1171 | .close = uninit, |
| 1172 | .decode = decode, |
| 1173 | .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL, |
| 1174 | .flush = flush, |
| 1175 | .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE}, |
| 1176 | .priv_class = &msmpeg4_class, |
| 1177 | }; |
| 1178 | #endif |
| 1179 | |
| 1180 | #if CONFIG_VC1_CRYSTALHD_DECODER |
| 1181 | static AVClass vc1_class = { |
| 1182 | "vc1_crystalhd", |
| 1183 | av_default_item_name, |
| 1184 | options, |
| 1185 | LIBAVUTIL_VERSION_INT, |
| 1186 | }; |
| 1187 | |
| 1188 | AVCodec ff_vc1_crystalhd_decoder = { |
| 1189 | .name = "vc1_crystalhd", |
| 1190 | .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 (CrystalHD acceleration)"), |
| 1191 | .type = AVMEDIA_TYPE_VIDEO, |
| 1192 | .id = AV_CODEC_ID_VC1, |
| 1193 | .priv_data_size = sizeof(CHDContext), |
| 1194 | .init = init, |
| 1195 | .close = uninit, |
| 1196 | .decode = decode, |
| 1197 | .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, |
| 1198 | .flush = flush, |
| 1199 | .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE}, |
| 1200 | .priv_class = &vc1_class, |
| 1201 | }; |
| 1202 | #endif |
| 1203 | |
| 1204 | #if CONFIG_WMV3_CRYSTALHD_DECODER |
| 1205 | static AVClass wmv3_class = { |
| 1206 | "wmv3_crystalhd", |
| 1207 | av_default_item_name, |
| 1208 | options, |
| 1209 | LIBAVUTIL_VERSION_INT, |
| 1210 | }; |
| 1211 | |
| 1212 | AVCodec ff_wmv3_crystalhd_decoder = { |
| 1213 | .name = "wmv3_crystalhd", |
| 1214 | .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 (CrystalHD acceleration)"), |
| 1215 | .type = AVMEDIA_TYPE_VIDEO, |
| 1216 | .id = AV_CODEC_ID_WMV3, |
| 1217 | .priv_data_size = sizeof(CHDContext), |
| 1218 | .init = init, |
| 1219 | .close = uninit, |
| 1220 | .decode = decode, |
| 1221 | .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, |
| 1222 | .flush = flush, |
| 1223 | .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE}, |
| 1224 | .priv_class = &wmv3_class, |
| 1225 | }; |
| 1226 | #endif |