| 1 | /* |
| 2 | * AVFoundation input device |
| 3 | * Copyright (c) 2014 Thilo Borgmann <thilo.borgmann@mail.de> |
| 4 | * |
| 5 | * This file is part of FFmpeg. |
| 6 | * |
| 7 | * FFmpeg is free software; you can redistribute it and/or |
| 8 | * modify it under the terms of the GNU Lesser General Public |
| 9 | * License as published by the Free Software Foundation; either |
| 10 | * version 2.1 of the License, or (at your option) any later version. |
| 11 | * |
| 12 | * FFmpeg is distributed in the hope that it will be useful, |
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 15 | * Lesser General Public License for more details. |
| 16 | * |
| 17 | * You should have received a copy of the GNU Lesser General Public |
| 18 | * License along with FFmpeg; if not, write to the Free Software |
| 19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 20 | */ |
| 21 | |
| 22 | /** |
| 23 | * @file |
| 24 | * AVFoundation input device |
| 25 | * @author Thilo Borgmann <thilo.borgmann@mail.de> |
| 26 | */ |
| 27 | |
| 28 | #import <AVFoundation/AVFoundation.h> |
| 29 | #include <pthread.h> |
| 30 | |
| 31 | #include "libavutil/pixdesc.h" |
| 32 | #include "libavutil/opt.h" |
| 33 | #include "libavformat/internal.h" |
| 34 | #include "libavutil/internal.h" |
| 35 | #include "libavutil/time.h" |
| 36 | #include "avdevice.h" |
| 37 | |
| 38 | static const int avf_time_base = 100; |
| 39 | |
| 40 | static const AVRational avf_time_base_q = { |
| 41 | .num = 1, |
| 42 | .den = avf_time_base |
| 43 | }; |
| 44 | |
| 45 | struct AVFPixelFormatSpec { |
| 46 | enum AVPixelFormat ff_id; |
| 47 | OSType avf_id; |
| 48 | }; |
| 49 | |
| 50 | static const struct AVFPixelFormatSpec avf_pixel_formats[] = { |
| 51 | { AV_PIX_FMT_MONOBLACK, kCVPixelFormatType_1Monochrome }, |
| 52 | { AV_PIX_FMT_RGB555BE, kCVPixelFormatType_16BE555 }, |
| 53 | { AV_PIX_FMT_RGB555LE, kCVPixelFormatType_16LE555 }, |
| 54 | { AV_PIX_FMT_RGB565BE, kCVPixelFormatType_16BE565 }, |
| 55 | { AV_PIX_FMT_RGB565LE, kCVPixelFormatType_16LE565 }, |
| 56 | { AV_PIX_FMT_RGB24, kCVPixelFormatType_24RGB }, |
| 57 | { AV_PIX_FMT_BGR24, kCVPixelFormatType_24BGR }, |
| 58 | { AV_PIX_FMT_0RGB, kCVPixelFormatType_32ARGB }, |
| 59 | { AV_PIX_FMT_BGR0, kCVPixelFormatType_32BGRA }, |
| 60 | { AV_PIX_FMT_0BGR, kCVPixelFormatType_32ABGR }, |
| 61 | { AV_PIX_FMT_RGB0, kCVPixelFormatType_32RGBA }, |
| 62 | { AV_PIX_FMT_BGR48BE, kCVPixelFormatType_48RGB }, |
| 63 | { AV_PIX_FMT_UYVY422, kCVPixelFormatType_422YpCbCr8 }, |
| 64 | { AV_PIX_FMT_YUVA444P, kCVPixelFormatType_4444YpCbCrA8R }, |
| 65 | { AV_PIX_FMT_YUVA444P16LE, kCVPixelFormatType_4444AYpCbCr16 }, |
| 66 | { AV_PIX_FMT_YUV444P, kCVPixelFormatType_444YpCbCr8 }, |
| 67 | { AV_PIX_FMT_YUV422P16, kCVPixelFormatType_422YpCbCr16 }, |
| 68 | { AV_PIX_FMT_YUV422P10, kCVPixelFormatType_422YpCbCr10 }, |
| 69 | { AV_PIX_FMT_YUV444P10, kCVPixelFormatType_444YpCbCr10 }, |
| 70 | { AV_PIX_FMT_YUV420P, kCVPixelFormatType_420YpCbCr8Planar }, |
| 71 | { AV_PIX_FMT_NV12, kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange }, |
| 72 | { AV_PIX_FMT_YUYV422, kCVPixelFormatType_422YpCbCr8_yuvs }, |
| 73 | #if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080 |
| 74 | { AV_PIX_FMT_GRAY8, kCVPixelFormatType_OneComponent8 }, |
| 75 | #endif |
| 76 | { AV_PIX_FMT_NONE, 0 } |
| 77 | }; |
| 78 | |
| 79 | typedef struct |
| 80 | { |
| 81 | AVClass* class; |
| 82 | |
| 83 | float frame_rate; |
| 84 | int frames_captured; |
| 85 | int64_t first_pts; |
| 86 | pthread_mutex_t frame_lock; |
| 87 | pthread_cond_t frame_wait_cond; |
| 88 | id avf_delegate; |
| 89 | |
| 90 | int list_devices; |
| 91 | int video_device_index; |
| 92 | enum AVPixelFormat pixel_format; |
| 93 | |
| 94 | AVCaptureSession *capture_session; |
| 95 | AVCaptureVideoDataOutput *video_output; |
| 96 | CMSampleBufferRef current_frame; |
| 97 | } AVFContext; |
| 98 | |
| 99 | static void lock_frames(AVFContext* ctx) |
| 100 | { |
| 101 | pthread_mutex_lock(&ctx->frame_lock); |
| 102 | } |
| 103 | |
| 104 | static void unlock_frames(AVFContext* ctx) |
| 105 | { |
| 106 | pthread_mutex_unlock(&ctx->frame_lock); |
| 107 | } |
| 108 | |
| 109 | /** FrameReciever class - delegate for AVCaptureSession |
| 110 | */ |
| 111 | @interface AVFFrameReceiver : NSObject |
| 112 | { |
| 113 | AVFContext* _context; |
| 114 | } |
| 115 | |
| 116 | - (id)initWithContext:(AVFContext*)context; |
| 117 | |
| 118 | - (void) captureOutput:(AVCaptureOutput *)captureOutput |
| 119 | didOutputSampleBuffer:(CMSampleBufferRef)videoFrame |
| 120 | fromConnection:(AVCaptureConnection *)connection; |
| 121 | |
| 122 | @end |
| 123 | |
| 124 | @implementation AVFFrameReceiver |
| 125 | |
| 126 | - (id)initWithContext:(AVFContext*)context |
| 127 | { |
| 128 | if (self = [super init]) { |
| 129 | _context = context; |
| 130 | } |
| 131 | return self; |
| 132 | } |
| 133 | |
| 134 | - (void) captureOutput:(AVCaptureOutput *)captureOutput |
| 135 | didOutputSampleBuffer:(CMSampleBufferRef)videoFrame |
| 136 | fromConnection:(AVCaptureConnection *)connection |
| 137 | { |
| 138 | lock_frames(_context); |
| 139 | |
| 140 | if (_context->current_frame != nil) { |
| 141 | CFRelease(_context->current_frame); |
| 142 | } |
| 143 | |
| 144 | _context->current_frame = (CMSampleBufferRef)CFRetain(videoFrame); |
| 145 | |
| 146 | pthread_cond_signal(&_context->frame_wait_cond); |
| 147 | |
| 148 | unlock_frames(_context); |
| 149 | |
| 150 | ++_context->frames_captured; |
| 151 | } |
| 152 | |
| 153 | @end |
| 154 | |
| 155 | static void destroy_context(AVFContext* ctx) |
| 156 | { |
| 157 | [ctx->capture_session stopRunning]; |
| 158 | |
| 159 | [ctx->capture_session release]; |
| 160 | [ctx->video_output release]; |
| 161 | [ctx->avf_delegate release]; |
| 162 | |
| 163 | ctx->capture_session = NULL; |
| 164 | ctx->video_output = NULL; |
| 165 | ctx->avf_delegate = NULL; |
| 166 | |
| 167 | pthread_mutex_destroy(&ctx->frame_lock); |
| 168 | pthread_cond_destroy(&ctx->frame_wait_cond); |
| 169 | |
| 170 | if (ctx->current_frame) { |
| 171 | CFRelease(ctx->current_frame); |
| 172 | } |
| 173 | } |
| 174 | |
| 175 | static int avf_read_header(AVFormatContext *s) |
| 176 | { |
| 177 | NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init]; |
| 178 | AVFContext *ctx = (AVFContext*)s->priv_data; |
| 179 | ctx->first_pts = av_gettime(); |
| 180 | |
| 181 | pthread_mutex_init(&ctx->frame_lock, NULL); |
| 182 | pthread_cond_init(&ctx->frame_wait_cond, NULL); |
| 183 | |
| 184 | // List devices if requested |
| 185 | if (ctx->list_devices) { |
| 186 | av_log(ctx, AV_LOG_INFO, "AVFoundation video devices:\n"); |
| 187 | NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]; |
| 188 | for (AVCaptureDevice *device in devices) { |
| 189 | const char *name = [[device localizedName] UTF8String]; |
| 190 | int index = [devices indexOfObject:device]; |
| 191 | av_log(ctx, AV_LOG_INFO, "[%d] %s\n", index, name); |
| 192 | } |
| 193 | goto fail; |
| 194 | } |
| 195 | |
| 196 | // Find capture device |
| 197 | AVCaptureDevice *video_device = nil; |
| 198 | |
| 199 | // check for device index given in filename |
| 200 | if (ctx->video_device_index == -1) { |
| 201 | sscanf(s->filename, "%d", &ctx->video_device_index); |
| 202 | } |
| 203 | |
| 204 | if (ctx->video_device_index >= 0) { |
| 205 | NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]; |
| 206 | |
| 207 | if (ctx->video_device_index >= [devices count]) { |
| 208 | av_log(ctx, AV_LOG_ERROR, "Invalid device index\n"); |
| 209 | goto fail; |
| 210 | } |
| 211 | |
| 212 | video_device = [devices objectAtIndex:ctx->video_device_index]; |
| 213 | } else if (strncmp(s->filename, "", 1) && |
| 214 | strncmp(s->filename, "default", 7)) { |
| 215 | NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]; |
| 216 | |
| 217 | for (AVCaptureDevice *device in devices) { |
| 218 | if (!strncmp(s->filename, [[device localizedName] UTF8String], strlen(s->filename))) { |
| 219 | video_device = device; |
| 220 | break; |
| 221 | } |
| 222 | } |
| 223 | |
| 224 | if (!video_device) { |
| 225 | av_log(ctx, AV_LOG_ERROR, "Video device not found\n"); |
| 226 | goto fail; |
| 227 | } |
| 228 | } else { |
| 229 | video_device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeMuxed]; |
| 230 | } |
| 231 | |
| 232 | // Video capture device not found, looking for AVMediaTypeVideo |
| 233 | if (!video_device) { |
| 234 | video_device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo]; |
| 235 | |
| 236 | if (!video_device) { |
| 237 | av_log(s, AV_LOG_ERROR, "No AV capture device found\n"); |
| 238 | goto fail; |
| 239 | } |
| 240 | } |
| 241 | |
| 242 | NSString* dev_display_name = [video_device localizedName]; |
| 243 | av_log(s, AV_LOG_DEBUG, "'%s' opened\n", [dev_display_name UTF8String]); |
| 244 | |
| 245 | // Initialize capture session |
| 246 | ctx->capture_session = [[AVCaptureSession alloc] init]; |
| 247 | |
| 248 | NSError *error = nil; |
| 249 | AVCaptureDeviceInput* capture_dev_input = [[[AVCaptureDeviceInput alloc] initWithDevice:video_device error:&error] autorelease]; |
| 250 | |
| 251 | if (!capture_dev_input) { |
| 252 | av_log(s, AV_LOG_ERROR, "Failed to create AV capture input device: %s\n", |
| 253 | [[error localizedDescription] UTF8String]); |
| 254 | goto fail; |
| 255 | } |
| 256 | |
| 257 | if (!capture_dev_input) { |
| 258 | av_log(s, AV_LOG_ERROR, "Failed to add AV capture input device to session: %s\n", |
| 259 | [[error localizedDescription] UTF8String]); |
| 260 | goto fail; |
| 261 | } |
| 262 | |
| 263 | if ([ctx->capture_session canAddInput:capture_dev_input]) { |
| 264 | [ctx->capture_session addInput:capture_dev_input]; |
| 265 | } else { |
| 266 | av_log(s, AV_LOG_ERROR, "can't add video input to capture session\n"); |
| 267 | goto fail; |
| 268 | } |
| 269 | |
| 270 | // Attaching output |
| 271 | ctx->video_output = [[AVCaptureVideoDataOutput alloc] init]; |
| 272 | |
| 273 | if (!ctx->video_output) { |
| 274 | av_log(s, AV_LOG_ERROR, "Failed to init AV video output\n"); |
| 275 | goto fail; |
| 276 | } |
| 277 | |
| 278 | // select pixel format |
| 279 | struct AVFPixelFormatSpec pxl_fmt_spec; |
| 280 | pxl_fmt_spec.ff_id = AV_PIX_FMT_NONE; |
| 281 | |
| 282 | for (int i = 0; avf_pixel_formats[i].ff_id != AV_PIX_FMT_NONE; i++) { |
| 283 | if (ctx->pixel_format == avf_pixel_formats[i].ff_id) { |
| 284 | pxl_fmt_spec = avf_pixel_formats[i]; |
| 285 | break; |
| 286 | } |
| 287 | } |
| 288 | |
| 289 | // check if selected pixel format is supported by AVFoundation |
| 290 | if (pxl_fmt_spec.ff_id == AV_PIX_FMT_NONE) { |
| 291 | av_log(s, AV_LOG_ERROR, "Selected pixel format (%s) is not supported by AVFoundation.\n", |
| 292 | av_get_pix_fmt_name(pxl_fmt_spec.ff_id)); |
| 293 | goto fail; |
| 294 | } |
| 295 | |
| 296 | // check if the pixel format is available for this device |
| 297 | if ([[ctx->video_output availableVideoCVPixelFormatTypes] indexOfObject:[NSNumber numberWithInt:pxl_fmt_spec.avf_id]] == NSNotFound) { |
| 298 | av_log(s, AV_LOG_ERROR, "Selected pixel format (%s) is not supported by the input device.\n", |
| 299 | av_get_pix_fmt_name(pxl_fmt_spec.ff_id)); |
| 300 | |
| 301 | pxl_fmt_spec.ff_id = AV_PIX_FMT_NONE; |
| 302 | |
| 303 | av_log(s, AV_LOG_ERROR, "Supported pixel formats:\n"); |
| 304 | for (NSNumber *pxl_fmt in [ctx->video_output availableVideoCVPixelFormatTypes]) { |
| 305 | struct AVFPixelFormatSpec pxl_fmt_dummy; |
| 306 | pxl_fmt_dummy.ff_id = AV_PIX_FMT_NONE; |
| 307 | for (int i = 0; avf_pixel_formats[i].ff_id != AV_PIX_FMT_NONE; i++) { |
| 308 | if ([pxl_fmt intValue] == avf_pixel_formats[i].avf_id) { |
| 309 | pxl_fmt_dummy = avf_pixel_formats[i]; |
| 310 | break; |
| 311 | } |
| 312 | } |
| 313 | |
| 314 | if (pxl_fmt_dummy.ff_id != AV_PIX_FMT_NONE) { |
| 315 | av_log(s, AV_LOG_ERROR, " %s\n", av_get_pix_fmt_name(pxl_fmt_dummy.ff_id)); |
| 316 | |
| 317 | // select first supported pixel format instead of user selected (or default) pixel format |
| 318 | if (pxl_fmt_spec.ff_id == AV_PIX_FMT_NONE) { |
| 319 | pxl_fmt_spec = pxl_fmt_dummy; |
| 320 | } |
| 321 | } |
| 322 | } |
| 323 | |
| 324 | // fail if there is no appropriate pixel format or print a warning about overriding the pixel format |
| 325 | if (pxl_fmt_spec.ff_id == AV_PIX_FMT_NONE) { |
| 326 | goto fail; |
| 327 | } else { |
| 328 | av_log(s, AV_LOG_WARNING, "Overriding selected pixel format to use %s instead.\n", |
| 329 | av_get_pix_fmt_name(pxl_fmt_spec.ff_id)); |
| 330 | } |
| 331 | } |
| 332 | |
| 333 | |
| 334 | NSNumber *pixel_format = [NSNumber numberWithUnsignedInt:pxl_fmt_spec.avf_id]; |
| 335 | NSDictionary *capture_dict = [NSDictionary dictionaryWithObject:pixel_format |
| 336 | forKey:(id)kCVPixelBufferPixelFormatTypeKey]; |
| 337 | |
| 338 | [ctx->video_output setVideoSettings:capture_dict]; |
| 339 | [ctx->video_output setAlwaysDiscardsLateVideoFrames:YES]; |
| 340 | |
| 341 | ctx->avf_delegate = [[AVFFrameReceiver alloc] initWithContext:ctx]; |
| 342 | |
| 343 | dispatch_queue_t queue = dispatch_queue_create("avf_queue", NULL); |
| 344 | [ctx->video_output setSampleBufferDelegate:ctx->avf_delegate queue:queue]; |
| 345 | dispatch_release(queue); |
| 346 | |
| 347 | if ([ctx->capture_session canAddOutput:ctx->video_output]) { |
| 348 | [ctx->capture_session addOutput:ctx->video_output]; |
| 349 | } else { |
| 350 | av_log(s, AV_LOG_ERROR, "can't add video output to capture session\n"); |
| 351 | goto fail; |
| 352 | } |
| 353 | |
| 354 | [ctx->capture_session startRunning]; |
| 355 | |
| 356 | // Take stream info from the first frame. |
| 357 | while (ctx->frames_captured < 1) { |
| 358 | CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.1, YES); |
| 359 | } |
| 360 | |
| 361 | lock_frames(ctx); |
| 362 | |
| 363 | AVStream* stream = avformat_new_stream(s, NULL); |
| 364 | |
| 365 | if (!stream) { |
| 366 | goto fail; |
| 367 | } |
| 368 | |
| 369 | avpriv_set_pts_info(stream, 64, 1, avf_time_base); |
| 370 | |
| 371 | CVImageBufferRef image_buffer = CMSampleBufferGetImageBuffer(ctx->current_frame); |
| 372 | CGSize image_buffer_size = CVImageBufferGetEncodedSize(image_buffer); |
| 373 | |
| 374 | stream->codec->codec_id = AV_CODEC_ID_RAWVIDEO; |
| 375 | stream->codec->codec_type = AVMEDIA_TYPE_VIDEO; |
| 376 | stream->codec->width = (int)image_buffer_size.width; |
| 377 | stream->codec->height = (int)image_buffer_size.height; |
| 378 | stream->codec->pix_fmt = pxl_fmt_spec.ff_id; |
| 379 | |
| 380 | CFRelease(ctx->current_frame); |
| 381 | ctx->current_frame = nil; |
| 382 | |
| 383 | unlock_frames(ctx); |
| 384 | [pool release]; |
| 385 | return 0; |
| 386 | |
| 387 | fail: |
| 388 | [pool release]; |
| 389 | destroy_context(ctx); |
| 390 | return AVERROR(EIO); |
| 391 | } |
| 392 | |
| 393 | static int avf_read_packet(AVFormatContext *s, AVPacket *pkt) |
| 394 | { |
| 395 | AVFContext* ctx = (AVFContext*)s->priv_data; |
| 396 | |
| 397 | do { |
| 398 | lock_frames(ctx); |
| 399 | |
| 400 | CVImageBufferRef image_buffer = CMSampleBufferGetImageBuffer(ctx->current_frame); |
| 401 | |
| 402 | if (ctx->current_frame != nil) { |
| 403 | if (av_new_packet(pkt, (int)CVPixelBufferGetDataSize(image_buffer)) < 0) { |
| 404 | return AVERROR(EIO); |
| 405 | } |
| 406 | |
| 407 | pkt->pts = pkt->dts = av_rescale_q(av_gettime() - ctx->first_pts, |
| 408 | AV_TIME_BASE_Q, |
| 409 | avf_time_base_q); |
| 410 | pkt->stream_index = 0; |
| 411 | pkt->flags |= AV_PKT_FLAG_KEY; |
| 412 | |
| 413 | CVPixelBufferLockBaseAddress(image_buffer, 0); |
| 414 | |
| 415 | void* data = CVPixelBufferGetBaseAddress(image_buffer); |
| 416 | memcpy(pkt->data, data, pkt->size); |
| 417 | |
| 418 | CVPixelBufferUnlockBaseAddress(image_buffer, 0); |
| 419 | CFRelease(ctx->current_frame); |
| 420 | ctx->current_frame = nil; |
| 421 | } else { |
| 422 | pkt->data = NULL; |
| 423 | pthread_cond_wait(&ctx->frame_wait_cond, &ctx->frame_lock); |
| 424 | } |
| 425 | |
| 426 | unlock_frames(ctx); |
| 427 | } while (!pkt->data); |
| 428 | |
| 429 | return 0; |
| 430 | } |
| 431 | |
| 432 | static int avf_close(AVFormatContext *s) |
| 433 | { |
| 434 | AVFContext* ctx = (AVFContext*)s->priv_data; |
| 435 | destroy_context(ctx); |
| 436 | return 0; |
| 437 | } |
| 438 | |
| 439 | static const AVOption options[] = { |
| 440 | { "frame_rate", "set frame rate", offsetof(AVFContext, frame_rate), AV_OPT_TYPE_FLOAT, { .dbl = 30.0 }, 0.1, 30.0, AV_OPT_TYPE_VIDEO_RATE, NULL }, |
| 441 | { "list_devices", "list available devices", offsetof(AVFContext, list_devices), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM, "list_devices" }, |
| 442 | { "true", "", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" }, |
| 443 | { "false", "", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" }, |
| 444 | { "video_device_index", "select video device by index for devices with same name (starts at 0)", offsetof(AVFContext, video_device_index), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM }, |
| 445 | { "pixel_format", "set pixel format", offsetof(AVFContext, pixel_format), AV_OPT_TYPE_PIXEL_FMT, {.i64 = AV_PIX_FMT_YUV420P}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM}, |
| 446 | { NULL }, |
| 447 | }; |
| 448 | |
| 449 | static const AVClass avf_class = { |
| 450 | .class_name = "AVFoundation input device", |
| 451 | .item_name = av_default_item_name, |
| 452 | .option = options, |
| 453 | .version = LIBAVUTIL_VERSION_INT, |
| 454 | .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT, |
| 455 | }; |
| 456 | |
| 457 | AVInputFormat ff_avfoundation_demuxer = { |
| 458 | .name = "avfoundation", |
| 459 | .long_name = NULL_IF_CONFIG_SMALL("AVFoundation input device"), |
| 460 | .priv_data_size = sizeof(AVFContext), |
| 461 | .read_header = avf_read_header, |
| 462 | .read_packet = avf_read_packet, |
| 463 | .read_close = avf_close, |
| 464 | .flags = AVFMT_NOFILE, |
| 465 | .priv_class = &avf_class, |
| 466 | }; |