Commit | Line | Data |
---|---|---|
2ba45a60 DM |
1 | /* |
2 | * Copyright (c) 2000,2001 Fabrice Bellard | |
3 | * Copyright (c) 2006 Luca Abeni | |
4 | * | |
5 | * This file is part of FFmpeg. | |
6 | * | |
7 | * FFmpeg is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU Lesser General Public | |
9 | * License as published by the Free Software Foundation; either | |
10 | * version 2.1 of the License, or (at your option) any later version. | |
11 | * | |
12 | * FFmpeg is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * Lesser General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU Lesser General Public | |
18 | * License along with FFmpeg; if not, write to the Free Software | |
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
20 | */ | |
21 | ||
22 | /** | |
23 | * @file | |
24 | * Video4Linux2 grab interface | |
25 | * | |
26 | * Part of this file is based on the V4L2 video capture example | |
27 | * (http://linuxtv.org/downloads/v4l-dvb-apis/capture-example.html) | |
28 | * | |
29 | * Thanks to Michael Niedermayer for providing the mapping between | |
30 | * V4L2_PIX_FMT_* and AV_PIX_FMT_* | |
31 | */ | |
32 | ||
33 | #include "v4l2-common.h" | |
34 | ||
35 | #if CONFIG_LIBV4L2 | |
36 | #include <libv4l2.h> | |
37 | #endif | |
38 | ||
39 | static const int desired_video_buffers = 256; | |
40 | ||
41 | #define V4L_ALLFORMATS 3 | |
42 | #define V4L_RAWFORMATS 1 | |
43 | #define V4L_COMPFORMATS 2 | |
44 | ||
45 | /** | |
46 | * Return timestamps to the user exactly as returned by the kernel | |
47 | */ | |
48 | #define V4L_TS_DEFAULT 0 | |
49 | /** | |
50 | * Autodetect the kind of timestamps returned by the kernel and convert to | |
51 | * absolute (wall clock) timestamps. | |
52 | */ | |
53 | #define V4L_TS_ABS 1 | |
54 | /** | |
55 | * Assume kernel timestamps are from the monotonic clock and convert to | |
56 | * absolute timestamps. | |
57 | */ | |
58 | #define V4L_TS_MONO2ABS 2 | |
59 | ||
60 | /** | |
61 | * Once the kind of timestamps returned by the kernel have been detected, | |
62 | * the value of the timefilter (NULL or not) determines whether a conversion | |
63 | * takes place. | |
64 | */ | |
65 | #define V4L_TS_CONVERT_READY V4L_TS_DEFAULT | |
66 | ||
67 | struct video_data { | |
68 | AVClass *class; | |
69 | int fd; | |
f6fa7814 | 70 | int pixelformat; /* V4L2_PIX_FMT_* */ |
2ba45a60 DM |
71 | int width, height; |
72 | int frame_size; | |
73 | int interlaced; | |
74 | int top_field_first; | |
75 | int ts_mode; | |
76 | TimeFilter *timefilter; | |
77 | int64_t last_time_m; | |
78 | ||
79 | int buffers; | |
80 | volatile int buffers_queued; | |
81 | void **buf_start; | |
82 | unsigned int *buf_len; | |
83 | char *standard; | |
84 | v4l2_std_id std_id; | |
85 | int channel; | |
86 | char *pixel_format; /**< Set by a private option. */ | |
87 | int list_format; /**< Set by a private option. */ | |
88 | int list_standard; /**< Set by a private option. */ | |
89 | char *framerate; /**< Set by a private option. */ | |
90 | ||
91 | int use_libv4l2; | |
92 | int (*open_f)(const char *file, int oflag, ...); | |
93 | int (*close_f)(int fd); | |
94 | int (*dup_f)(int fd); | |
95 | int (*ioctl_f)(int fd, unsigned long int request, ...); | |
96 | ssize_t (*read_f)(int fd, void *buffer, size_t n); | |
97 | void *(*mmap_f)(void *start, size_t length, int prot, int flags, int fd, int64_t offset); | |
98 | int (*munmap_f)(void *_start, size_t length); | |
99 | }; | |
100 | ||
101 | struct buff_data { | |
102 | struct video_data *s; | |
103 | int index; | |
104 | }; | |
105 | ||
106 | static int device_open(AVFormatContext *ctx) | |
107 | { | |
108 | struct video_data *s = ctx->priv_data; | |
109 | struct v4l2_capability cap; | |
110 | int fd; | |
f6fa7814 | 111 | int err; |
2ba45a60 DM |
112 | int flags = O_RDWR; |
113 | ||
114 | #define SET_WRAPPERS(prefix) do { \ | |
115 | s->open_f = prefix ## open; \ | |
116 | s->close_f = prefix ## close; \ | |
117 | s->dup_f = prefix ## dup; \ | |
118 | s->ioctl_f = prefix ## ioctl; \ | |
119 | s->read_f = prefix ## read; \ | |
120 | s->mmap_f = prefix ## mmap; \ | |
121 | s->munmap_f = prefix ## munmap; \ | |
122 | } while (0) | |
123 | ||
124 | if (s->use_libv4l2) { | |
125 | #if CONFIG_LIBV4L2 | |
126 | SET_WRAPPERS(v4l2_); | |
127 | #else | |
128 | av_log(ctx, AV_LOG_ERROR, "libavdevice is not build with libv4l2 support.\n"); | |
129 | return AVERROR(EINVAL); | |
130 | #endif | |
131 | } else { | |
132 | SET_WRAPPERS(); | |
133 | } | |
134 | ||
135 | #define v4l2_open s->open_f | |
136 | #define v4l2_close s->close_f | |
137 | #define v4l2_dup s->dup_f | |
138 | #define v4l2_ioctl s->ioctl_f | |
139 | #define v4l2_read s->read_f | |
140 | #define v4l2_mmap s->mmap_f | |
141 | #define v4l2_munmap s->munmap_f | |
142 | ||
143 | if (ctx->flags & AVFMT_FLAG_NONBLOCK) { | |
144 | flags |= O_NONBLOCK; | |
145 | } | |
146 | ||
147 | fd = v4l2_open(ctx->filename, flags, 0); | |
148 | if (fd < 0) { | |
f6fa7814 | 149 | err = AVERROR(errno); |
2ba45a60 | 150 | av_log(ctx, AV_LOG_ERROR, "Cannot open video device %s: %s\n", |
f6fa7814 DM |
151 | ctx->filename, av_err2str(err)); |
152 | return err; | |
2ba45a60 DM |
153 | } |
154 | ||
155 | if (v4l2_ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0) { | |
f6fa7814 | 156 | err = AVERROR(errno); |
2ba45a60 | 157 | av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYCAP): %s\n", |
f6fa7814 | 158 | av_err2str(err)); |
2ba45a60 DM |
159 | goto fail; |
160 | } | |
161 | ||
162 | av_log(ctx, AV_LOG_VERBOSE, "fd:%d capabilities:%x\n", | |
163 | fd, cap.capabilities); | |
164 | ||
165 | if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) { | |
166 | av_log(ctx, AV_LOG_ERROR, "Not a video capture device.\n"); | |
f6fa7814 | 167 | err = AVERROR(ENODEV); |
2ba45a60 DM |
168 | goto fail; |
169 | } | |
170 | ||
171 | if (!(cap.capabilities & V4L2_CAP_STREAMING)) { | |
172 | av_log(ctx, AV_LOG_ERROR, | |
173 | "The device does not support the streaming I/O method.\n"); | |
f6fa7814 | 174 | err = AVERROR(ENOSYS); |
2ba45a60 DM |
175 | goto fail; |
176 | } | |
177 | ||
178 | return fd; | |
179 | ||
180 | fail: | |
181 | v4l2_close(fd); | |
f6fa7814 | 182 | return err; |
2ba45a60 DM |
183 | } |
184 | ||
185 | static int device_init(AVFormatContext *ctx, int *width, int *height, | |
f6fa7814 | 186 | uint32_t pixelformat) |
2ba45a60 DM |
187 | { |
188 | struct video_data *s = ctx->priv_data; | |
189 | struct v4l2_format fmt = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE }; | |
2ba45a60 DM |
190 | int res = 0; |
191 | ||
f6fa7814 DM |
192 | fmt.fmt.pix.width = *width; |
193 | fmt.fmt.pix.height = *height; | |
194 | fmt.fmt.pix.pixelformat = pixelformat; | |
195 | fmt.fmt.pix.field = V4L2_FIELD_ANY; | |
2ba45a60 | 196 | |
f6fa7814 DM |
197 | /* Some drivers will fail and return EINVAL when the pixelformat |
198 | is not supported (even if type field is valid and supported) */ | |
2ba45a60 DM |
199 | if (v4l2_ioctl(s->fd, VIDIOC_S_FMT, &fmt) < 0) |
200 | res = AVERROR(errno); | |
201 | ||
202 | if ((*width != fmt.fmt.pix.width) || (*height != fmt.fmt.pix.height)) { | |
203 | av_log(ctx, AV_LOG_INFO, | |
204 | "The V4L2 driver changed the video from %dx%d to %dx%d\n", | |
205 | *width, *height, fmt.fmt.pix.width, fmt.fmt.pix.height); | |
206 | *width = fmt.fmt.pix.width; | |
207 | *height = fmt.fmt.pix.height; | |
208 | } | |
209 | ||
f6fa7814 | 210 | if (pixelformat != fmt.fmt.pix.pixelformat) { |
2ba45a60 DM |
211 | av_log(ctx, AV_LOG_DEBUG, |
212 | "The V4L2 driver changed the pixel format " | |
213 | "from 0x%08X to 0x%08X\n", | |
f6fa7814 | 214 | pixelformat, fmt.fmt.pix.pixelformat); |
2ba45a60 DM |
215 | res = AVERROR(EINVAL); |
216 | } | |
217 | ||
218 | if (fmt.fmt.pix.field == V4L2_FIELD_INTERLACED) { | |
219 | av_log(ctx, AV_LOG_DEBUG, | |
220 | "The V4L2 driver is using the interlaced mode\n"); | |
221 | s->interlaced = 1; | |
222 | } | |
223 | ||
224 | return res; | |
225 | } | |
226 | ||
227 | static int first_field(const struct video_data *s) | |
228 | { | |
229 | int res; | |
230 | v4l2_std_id std; | |
231 | ||
232 | res = v4l2_ioctl(s->fd, VIDIOC_G_STD, &std); | |
233 | if (res < 0) | |
234 | return 0; | |
235 | if (std & V4L2_STD_NTSC) | |
236 | return 0; | |
237 | ||
238 | return 1; | |
239 | } | |
240 | ||
241 | #if HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE | |
242 | static void list_framesizes(AVFormatContext *ctx, uint32_t pixelformat) | |
243 | { | |
244 | const struct video_data *s = ctx->priv_data; | |
245 | struct v4l2_frmsizeenum vfse = { .pixel_format = pixelformat }; | |
246 | ||
247 | while(!v4l2_ioctl(s->fd, VIDIOC_ENUM_FRAMESIZES, &vfse)) { | |
248 | switch (vfse.type) { | |
249 | case V4L2_FRMSIZE_TYPE_DISCRETE: | |
250 | av_log(ctx, AV_LOG_INFO, " %ux%u", | |
251 | vfse.discrete.width, vfse.discrete.height); | |
252 | break; | |
253 | case V4L2_FRMSIZE_TYPE_CONTINUOUS: | |
254 | case V4L2_FRMSIZE_TYPE_STEPWISE: | |
255 | av_log(ctx, AV_LOG_INFO, " {%u-%u, %u}x{%u-%u, %u}", | |
256 | vfse.stepwise.min_width, | |
257 | vfse.stepwise.max_width, | |
258 | vfse.stepwise.step_width, | |
259 | vfse.stepwise.min_height, | |
260 | vfse.stepwise.max_height, | |
261 | vfse.stepwise.step_height); | |
262 | } | |
263 | vfse.index++; | |
264 | } | |
265 | } | |
266 | #endif | |
267 | ||
268 | static void list_formats(AVFormatContext *ctx, int type) | |
269 | { | |
270 | const struct video_data *s = ctx->priv_data; | |
271 | struct v4l2_fmtdesc vfd = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE }; | |
272 | ||
273 | while(!v4l2_ioctl(s->fd, VIDIOC_ENUM_FMT, &vfd)) { | |
f6fa7814 DM |
274 | enum AVCodecID codec_id = ff_fmt_v4l2codec(vfd.pixelformat); |
275 | enum AVPixelFormat pix_fmt = ff_fmt_v4l2ff(vfd.pixelformat, codec_id); | |
2ba45a60 DM |
276 | |
277 | vfd.index++; | |
278 | ||
279 | if (!(vfd.flags & V4L2_FMT_FLAG_COMPRESSED) && | |
280 | type & V4L_RAWFORMATS) { | |
281 | const char *fmt_name = av_get_pix_fmt_name(pix_fmt); | |
282 | av_log(ctx, AV_LOG_INFO, "Raw : %9s : %20s :", | |
283 | fmt_name ? fmt_name : "Unsupported", | |
284 | vfd.description); | |
285 | } else if (vfd.flags & V4L2_FMT_FLAG_COMPRESSED && | |
286 | type & V4L_COMPFORMATS) { | |
287 | AVCodec *codec = avcodec_find_decoder(codec_id); | |
288 | av_log(ctx, AV_LOG_INFO, "Compressed: %9s : %20s :", | |
289 | codec ? codec->name : "Unsupported", | |
290 | vfd.description); | |
291 | } else { | |
292 | continue; | |
293 | } | |
294 | ||
295 | #ifdef V4L2_FMT_FLAG_EMULATED | |
296 | if (vfd.flags & V4L2_FMT_FLAG_EMULATED) | |
297 | av_log(ctx, AV_LOG_INFO, " Emulated :"); | |
298 | #endif | |
299 | #if HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE | |
300 | list_framesizes(ctx, vfd.pixelformat); | |
301 | #endif | |
302 | av_log(ctx, AV_LOG_INFO, "\n"); | |
303 | } | |
304 | } | |
305 | ||
306 | static void list_standards(AVFormatContext *ctx) | |
307 | { | |
308 | int ret; | |
309 | struct video_data *s = ctx->priv_data; | |
310 | struct v4l2_standard standard; | |
311 | ||
312 | if (s->std_id == 0) | |
313 | return; | |
314 | ||
315 | for (standard.index = 0; ; standard.index++) { | |
316 | if (v4l2_ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) { | |
317 | ret = AVERROR(errno); | |
318 | if (ret == AVERROR(EINVAL)) { | |
319 | break; | |
320 | } else { | |
321 | av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_ENUMSTD): %s\n", av_err2str(ret)); | |
322 | return; | |
323 | } | |
324 | } | |
325 | av_log(ctx, AV_LOG_INFO, "%2d, %16"PRIx64", %s\n", | |
326 | standard.index, (uint64_t)standard.id, standard.name); | |
327 | } | |
328 | } | |
329 | ||
330 | static int mmap_init(AVFormatContext *ctx) | |
331 | { | |
332 | int i, res; | |
333 | struct video_data *s = ctx->priv_data; | |
334 | struct v4l2_requestbuffers req = { | |
335 | .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, | |
336 | .count = desired_video_buffers, | |
337 | .memory = V4L2_MEMORY_MMAP | |
338 | }; | |
339 | ||
340 | if (v4l2_ioctl(s->fd, VIDIOC_REQBUFS, &req) < 0) { | |
341 | res = AVERROR(errno); | |
342 | av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_REQBUFS): %s\n", av_err2str(res)); | |
343 | return res; | |
344 | } | |
345 | ||
346 | if (req.count < 2) { | |
347 | av_log(ctx, AV_LOG_ERROR, "Insufficient buffer memory\n"); | |
348 | return AVERROR(ENOMEM); | |
349 | } | |
350 | s->buffers = req.count; | |
351 | s->buf_start = av_malloc_array(s->buffers, sizeof(void *)); | |
352 | if (!s->buf_start) { | |
353 | av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer pointers\n"); | |
354 | return AVERROR(ENOMEM); | |
355 | } | |
356 | s->buf_len = av_malloc_array(s->buffers, sizeof(unsigned int)); | |
357 | if (!s->buf_len) { | |
358 | av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer sizes\n"); | |
359 | av_free(s->buf_start); | |
360 | return AVERROR(ENOMEM); | |
361 | } | |
362 | ||
363 | for (i = 0; i < req.count; i++) { | |
364 | struct v4l2_buffer buf = { | |
365 | .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, | |
366 | .index = i, | |
367 | .memory = V4L2_MEMORY_MMAP | |
368 | }; | |
369 | if (v4l2_ioctl(s->fd, VIDIOC_QUERYBUF, &buf) < 0) { | |
370 | res = AVERROR(errno); | |
371 | av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYBUF): %s\n", av_err2str(res)); | |
372 | return res; | |
373 | } | |
374 | ||
375 | s->buf_len[i] = buf.length; | |
376 | if (s->frame_size > 0 && s->buf_len[i] < s->frame_size) { | |
377 | av_log(ctx, AV_LOG_ERROR, | |
378 | "buf_len[%d] = %d < expected frame size %d\n", | |
379 | i, s->buf_len[i], s->frame_size); | |
380 | return AVERROR(ENOMEM); | |
381 | } | |
382 | s->buf_start[i] = v4l2_mmap(NULL, buf.length, | |
383 | PROT_READ | PROT_WRITE, MAP_SHARED, | |
384 | s->fd, buf.m.offset); | |
385 | ||
386 | if (s->buf_start[i] == MAP_FAILED) { | |
387 | res = AVERROR(errno); | |
388 | av_log(ctx, AV_LOG_ERROR, "mmap: %s\n", av_err2str(res)); | |
389 | return res; | |
390 | } | |
391 | } | |
392 | ||
393 | return 0; | |
394 | } | |
395 | ||
396 | #if FF_API_DESTRUCT_PACKET | |
397 | static void dummy_release_buffer(AVPacket *pkt) | |
398 | { | |
399 | av_assert0(0); | |
400 | } | |
401 | #endif | |
402 | ||
403 | static int enqueue_buffer(struct video_data *s, struct v4l2_buffer *buf) | |
404 | { | |
405 | int res = 0; | |
406 | ||
407 | if (v4l2_ioctl(s->fd, VIDIOC_QBUF, buf) < 0) { | |
408 | res = AVERROR(errno); | |
409 | av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF): %s\n", av_err2str(res)); | |
410 | } else { | |
411 | avpriv_atomic_int_add_and_fetch(&s->buffers_queued, 1); | |
412 | } | |
413 | ||
414 | return res; | |
415 | } | |
416 | ||
417 | static void mmap_release_buffer(void *opaque, uint8_t *data) | |
418 | { | |
419 | struct v4l2_buffer buf = { 0 }; | |
420 | struct buff_data *buf_descriptor = opaque; | |
421 | struct video_data *s = buf_descriptor->s; | |
422 | ||
423 | buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; | |
424 | buf.memory = V4L2_MEMORY_MMAP; | |
425 | buf.index = buf_descriptor->index; | |
426 | av_free(buf_descriptor); | |
427 | ||
428 | enqueue_buffer(s, &buf); | |
429 | } | |
430 | ||
431 | #if HAVE_CLOCK_GETTIME && defined(CLOCK_MONOTONIC) | |
432 | static int64_t av_gettime_monotonic(void) | |
433 | { | |
434 | return av_gettime_relative(); | |
435 | } | |
436 | #endif | |
437 | ||
438 | static int init_convert_timestamp(AVFormatContext *ctx, int64_t ts) | |
439 | { | |
440 | struct video_data *s = ctx->priv_data; | |
441 | int64_t now; | |
442 | ||
443 | now = av_gettime(); | |
444 | if (s->ts_mode == V4L_TS_ABS && | |
445 | ts <= now + 1 * AV_TIME_BASE && ts >= now - 10 * AV_TIME_BASE) { | |
446 | av_log(ctx, AV_LOG_INFO, "Detected absolute timestamps\n"); | |
447 | s->ts_mode = V4L_TS_CONVERT_READY; | |
448 | return 0; | |
449 | } | |
450 | #if HAVE_CLOCK_GETTIME && defined(CLOCK_MONOTONIC) | |
451 | if (ctx->streams[0]->avg_frame_rate.num) { | |
452 | now = av_gettime_monotonic(); | |
453 | if (s->ts_mode == V4L_TS_MONO2ABS || | |
454 | (ts <= now + 1 * AV_TIME_BASE && ts >= now - 10 * AV_TIME_BASE)) { | |
455 | AVRational tb = {AV_TIME_BASE, 1}; | |
456 | int64_t period = av_rescale_q(1, tb, ctx->streams[0]->avg_frame_rate); | |
457 | av_log(ctx, AV_LOG_INFO, "Detected monotonic timestamps, converting\n"); | |
458 | /* microseconds instead of seconds, MHz instead of Hz */ | |
459 | s->timefilter = ff_timefilter_new(1, period, 1.0E-6); | |
460 | if (!s->timefilter) | |
461 | return AVERROR(ENOMEM); | |
462 | s->ts_mode = V4L_TS_CONVERT_READY; | |
463 | return 0; | |
464 | } | |
465 | } | |
466 | #endif | |
467 | av_log(ctx, AV_LOG_ERROR, "Unknown timestamps\n"); | |
468 | return AVERROR(EIO); | |
469 | } | |
470 | ||
471 | static int convert_timestamp(AVFormatContext *ctx, int64_t *ts) | |
472 | { | |
473 | struct video_data *s = ctx->priv_data; | |
474 | ||
475 | if (s->ts_mode) { | |
476 | int r = init_convert_timestamp(ctx, *ts); | |
477 | if (r < 0) | |
478 | return r; | |
479 | } | |
480 | #if HAVE_CLOCK_GETTIME && defined(CLOCK_MONOTONIC) | |
481 | if (s->timefilter) { | |
482 | int64_t nowa = av_gettime(); | |
483 | int64_t nowm = av_gettime_monotonic(); | |
484 | ff_timefilter_update(s->timefilter, nowa, nowm - s->last_time_m); | |
485 | s->last_time_m = nowm; | |
486 | *ts = ff_timefilter_eval(s->timefilter, *ts - nowm); | |
487 | } | |
488 | #endif | |
489 | return 0; | |
490 | } | |
491 | ||
492 | static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt) | |
493 | { | |
494 | struct video_data *s = ctx->priv_data; | |
495 | struct v4l2_buffer buf = { | |
496 | .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, | |
497 | .memory = V4L2_MEMORY_MMAP | |
498 | }; | |
499 | int res; | |
500 | ||
501 | /* FIXME: Some special treatment might be needed in case of loss of signal... */ | |
502 | while ((res = v4l2_ioctl(s->fd, VIDIOC_DQBUF, &buf)) < 0 && (errno == EINTR)); | |
503 | if (res < 0) { | |
504 | if (errno == EAGAIN) { | |
505 | pkt->size = 0; | |
506 | return AVERROR(EAGAIN); | |
507 | } | |
508 | res = AVERROR(errno); | |
f6fa7814 DM |
509 | av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_DQBUF): %s\n", |
510 | av_err2str(res)); | |
2ba45a60 DM |
511 | return res; |
512 | } | |
513 | ||
514 | if (buf.index >= s->buffers) { | |
515 | av_log(ctx, AV_LOG_ERROR, "Invalid buffer index received.\n"); | |
516 | return AVERROR(EINVAL); | |
517 | } | |
518 | avpriv_atomic_int_add_and_fetch(&s->buffers_queued, -1); | |
519 | // always keep at least one buffer queued | |
520 | av_assert0(avpriv_atomic_int_get(&s->buffers_queued) >= 1); | |
521 | ||
522 | /* CPIA is a compressed format and we don't know the exact number of bytes | |
523 | * used by a frame, so set it here as the driver announces it. | |
524 | */ | |
525 | if (ctx->video_codec_id == AV_CODEC_ID_CPIA) | |
526 | s->frame_size = buf.bytesused; | |
527 | ||
528 | if (s->frame_size > 0 && buf.bytesused != s->frame_size) { | |
529 | av_log(ctx, AV_LOG_ERROR, | |
530 | "The v4l2 frame is %d bytes, but %d bytes are expected\n", | |
531 | buf.bytesused, s->frame_size); | |
532 | enqueue_buffer(s, &buf); | |
533 | return AVERROR_INVALIDDATA; | |
534 | } | |
535 | ||
536 | /* Image is at s->buff_start[buf.index] */ | |
537 | if (avpriv_atomic_int_get(&s->buffers_queued) == FFMAX(s->buffers / 8, 1)) { | |
538 | /* when we start getting low on queued buffers, fall back on copying data */ | |
539 | res = av_new_packet(pkt, buf.bytesused); | |
540 | if (res < 0) { | |
541 | av_log(ctx, AV_LOG_ERROR, "Error allocating a packet.\n"); | |
542 | enqueue_buffer(s, &buf); | |
543 | return res; | |
544 | } | |
545 | memcpy(pkt->data, s->buf_start[buf.index], buf.bytesused); | |
546 | ||
547 | res = enqueue_buffer(s, &buf); | |
548 | if (res) { | |
549 | av_free_packet(pkt); | |
550 | return res; | |
551 | } | |
552 | } else { | |
553 | struct buff_data *buf_descriptor; | |
554 | ||
555 | pkt->data = s->buf_start[buf.index]; | |
556 | pkt->size = buf.bytesused; | |
557 | #if FF_API_DESTRUCT_PACKET | |
558 | FF_DISABLE_DEPRECATION_WARNINGS | |
559 | pkt->destruct = dummy_release_buffer; | |
560 | FF_ENABLE_DEPRECATION_WARNINGS | |
561 | #endif | |
562 | ||
563 | buf_descriptor = av_malloc(sizeof(struct buff_data)); | |
564 | if (!buf_descriptor) { | |
565 | /* Something went wrong... Since av_malloc() failed, we cannot even | |
566 | * allocate a buffer for memcpying into it | |
567 | */ | |
568 | av_log(ctx, AV_LOG_ERROR, "Failed to allocate a buffer descriptor\n"); | |
569 | enqueue_buffer(s, &buf); | |
570 | ||
571 | return AVERROR(ENOMEM); | |
572 | } | |
573 | buf_descriptor->index = buf.index; | |
574 | buf_descriptor->s = s; | |
575 | ||
576 | pkt->buf = av_buffer_create(pkt->data, pkt->size, mmap_release_buffer, | |
577 | buf_descriptor, 0); | |
578 | if (!pkt->buf) { | |
579 | av_log(ctx, AV_LOG_ERROR, "Failed to create a buffer\n"); | |
580 | enqueue_buffer(s, &buf); | |
581 | av_freep(&buf_descriptor); | |
582 | return AVERROR(ENOMEM); | |
583 | } | |
584 | } | |
585 | pkt->pts = buf.timestamp.tv_sec * INT64_C(1000000) + buf.timestamp.tv_usec; | |
586 | convert_timestamp(ctx, &pkt->pts); | |
587 | ||
588 | return s->buf_len[buf.index]; | |
589 | } | |
590 | ||
591 | static int mmap_start(AVFormatContext *ctx) | |
592 | { | |
593 | struct video_data *s = ctx->priv_data; | |
594 | enum v4l2_buf_type type; | |
595 | int i, res; | |
596 | ||
597 | for (i = 0; i < s->buffers; i++) { | |
598 | struct v4l2_buffer buf = { | |
599 | .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, | |
600 | .index = i, | |
601 | .memory = V4L2_MEMORY_MMAP | |
602 | }; | |
603 | ||
604 | if (v4l2_ioctl(s->fd, VIDIOC_QBUF, &buf) < 0) { | |
605 | res = AVERROR(errno); | |
f6fa7814 DM |
606 | av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF): %s\n", |
607 | av_err2str(res)); | |
2ba45a60 DM |
608 | return res; |
609 | } | |
610 | } | |
611 | s->buffers_queued = s->buffers; | |
612 | ||
613 | type = V4L2_BUF_TYPE_VIDEO_CAPTURE; | |
614 | if (v4l2_ioctl(s->fd, VIDIOC_STREAMON, &type) < 0) { | |
615 | res = AVERROR(errno); | |
f6fa7814 DM |
616 | av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_STREAMON): %s\n", |
617 | av_err2str(res)); | |
2ba45a60 DM |
618 | return res; |
619 | } | |
620 | ||
621 | return 0; | |
622 | } | |
623 | ||
624 | static void mmap_close(struct video_data *s) | |
625 | { | |
626 | enum v4l2_buf_type type; | |
627 | int i; | |
628 | ||
629 | type = V4L2_BUF_TYPE_VIDEO_CAPTURE; | |
630 | /* We do not check for the result, because we could | |
631 | * not do anything about it anyway... | |
632 | */ | |
633 | v4l2_ioctl(s->fd, VIDIOC_STREAMOFF, &type); | |
634 | for (i = 0; i < s->buffers; i++) { | |
635 | v4l2_munmap(s->buf_start[i], s->buf_len[i]); | |
636 | } | |
637 | av_free(s->buf_start); | |
638 | av_free(s->buf_len); | |
639 | } | |
640 | ||
641 | static int v4l2_set_parameters(AVFormatContext *ctx) | |
642 | { | |
643 | struct video_data *s = ctx->priv_data; | |
644 | struct v4l2_standard standard = { 0 }; | |
645 | struct v4l2_streamparm streamparm = { 0 }; | |
646 | struct v4l2_fract *tpf; | |
647 | AVRational framerate_q = { 0 }; | |
648 | int i, ret; | |
649 | ||
650 | if (s->framerate && | |
651 | (ret = av_parse_video_rate(&framerate_q, s->framerate)) < 0) { | |
652 | av_log(ctx, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", | |
653 | s->framerate); | |
654 | return ret; | |
655 | } | |
656 | ||
657 | if (s->standard) { | |
658 | if (s->std_id) { | |
659 | ret = 0; | |
660 | av_log(ctx, AV_LOG_DEBUG, "Setting standard: %s\n", s->standard); | |
661 | /* set tv standard */ | |
662 | for (i = 0; ; i++) { | |
663 | standard.index = i; | |
664 | if (v4l2_ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) { | |
665 | ret = AVERROR(errno); | |
666 | break; | |
667 | } | |
668 | if (!av_strcasecmp(standard.name, s->standard)) | |
669 | break; | |
670 | } | |
671 | if (ret < 0) { | |
672 | av_log(ctx, AV_LOG_ERROR, "Unknown or unsupported standard '%s'\n", s->standard); | |
673 | return ret; | |
674 | } | |
675 | ||
676 | if (v4l2_ioctl(s->fd, VIDIOC_S_STD, &standard.id) < 0) { | |
677 | ret = AVERROR(errno); | |
678 | av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_S_STD): %s\n", av_err2str(ret)); | |
679 | return ret; | |
680 | } | |
681 | } else { | |
682 | av_log(ctx, AV_LOG_WARNING, | |
683 | "This device does not support any standard\n"); | |
684 | } | |
685 | } | |
686 | ||
687 | /* get standard */ | |
688 | if (v4l2_ioctl(s->fd, VIDIOC_G_STD, &s->std_id) == 0) { | |
689 | tpf = &standard.frameperiod; | |
690 | for (i = 0; ; i++) { | |
691 | standard.index = i; | |
692 | if (v4l2_ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) { | |
693 | ret = AVERROR(errno); | |
694 | if (ret == AVERROR(EINVAL) | |
695 | #ifdef ENODATA | |
696 | || ret == AVERROR(ENODATA) | |
697 | #endif | |
698 | ) { | |
699 | tpf = &streamparm.parm.capture.timeperframe; | |
700 | break; | |
701 | } | |
702 | av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_ENUMSTD): %s\n", av_err2str(ret)); | |
703 | return ret; | |
704 | } | |
705 | if (standard.id == s->std_id) { | |
706 | av_log(ctx, AV_LOG_DEBUG, | |
707 | "Current standard: %s, id: %"PRIx64", frameperiod: %d/%d\n", | |
708 | standard.name, (uint64_t)standard.id, tpf->numerator, tpf->denominator); | |
709 | break; | |
710 | } | |
711 | } | |
712 | } else { | |
713 | tpf = &streamparm.parm.capture.timeperframe; | |
714 | } | |
715 | ||
716 | streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; | |
717 | if (v4l2_ioctl(s->fd, VIDIOC_G_PARM, &streamparm) < 0) { | |
718 | ret = AVERROR(errno); | |
719 | av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_G_PARM): %s\n", av_err2str(ret)); | |
720 | return ret; | |
721 | } | |
722 | ||
723 | if (framerate_q.num && framerate_q.den) { | |
724 | if (streamparm.parm.capture.capability & V4L2_CAP_TIMEPERFRAME) { | |
725 | tpf = &streamparm.parm.capture.timeperframe; | |
726 | ||
727 | av_log(ctx, AV_LOG_DEBUG, "Setting time per frame to %d/%d\n", | |
728 | framerate_q.den, framerate_q.num); | |
729 | tpf->numerator = framerate_q.den; | |
730 | tpf->denominator = framerate_q.num; | |
731 | ||
732 | if (v4l2_ioctl(s->fd, VIDIOC_S_PARM, &streamparm) < 0) { | |
733 | ret = AVERROR(errno); | |
f6fa7814 DM |
734 | av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_S_PARM): %s\n", |
735 | av_err2str(ret)); | |
2ba45a60 DM |
736 | return ret; |
737 | } | |
738 | ||
739 | if (framerate_q.num != tpf->denominator || | |
740 | framerate_q.den != tpf->numerator) { | |
741 | av_log(ctx, AV_LOG_INFO, | |
742 | "The driver changed the time per frame from " | |
743 | "%d/%d to %d/%d\n", | |
744 | framerate_q.den, framerate_q.num, | |
745 | tpf->numerator, tpf->denominator); | |
746 | } | |
747 | } else { | |
748 | av_log(ctx, AV_LOG_WARNING, | |
749 | "The driver does not allow to change time per frame\n"); | |
750 | } | |
751 | } | |
752 | if (tpf->denominator > 0 && tpf->numerator > 0) { | |
753 | ctx->streams[0]->avg_frame_rate.num = tpf->denominator; | |
754 | ctx->streams[0]->avg_frame_rate.den = tpf->numerator; | |
755 | ctx->streams[0]->r_frame_rate = ctx->streams[0]->avg_frame_rate; | |
756 | } else | |
757 | av_log(ctx, AV_LOG_WARNING, "Time per frame unknown\n"); | |
758 | ||
759 | return 0; | |
760 | } | |
761 | ||
762 | static int device_try_init(AVFormatContext *ctx, | |
763 | enum AVPixelFormat pix_fmt, | |
764 | int *width, | |
765 | int *height, | |
766 | uint32_t *desired_format, | |
767 | enum AVCodecID *codec_id) | |
768 | { | |
769 | int ret, i; | |
770 | ||
f6fa7814 | 771 | *desired_format = ff_fmt_ff2v4l(pix_fmt, ctx->video_codec_id); |
2ba45a60 DM |
772 | |
773 | if (*desired_format) { | |
774 | ret = device_init(ctx, width, height, *desired_format); | |
775 | if (ret < 0) { | |
776 | *desired_format = 0; | |
777 | if (ret != AVERROR(EINVAL)) | |
778 | return ret; | |
779 | } | |
780 | } | |
781 | ||
782 | if (!*desired_format) { | |
f6fa7814 | 783 | for (i = 0; ff_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) { |
2ba45a60 | 784 | if (ctx->video_codec_id == AV_CODEC_ID_NONE || |
f6fa7814 | 785 | ff_fmt_conversion_table[i].codec_id == ctx->video_codec_id) { |
2ba45a60 | 786 | av_log(ctx, AV_LOG_DEBUG, "Trying to set codec:%s pix_fmt:%s\n", |
f6fa7814 DM |
787 | avcodec_get_name(ff_fmt_conversion_table[i].codec_id), |
788 | (char *)av_x_if_null(av_get_pix_fmt_name(ff_fmt_conversion_table[i].ff_fmt), "none")); | |
2ba45a60 | 789 | |
f6fa7814 | 790 | *desired_format = ff_fmt_conversion_table[i].v4l2_fmt; |
2ba45a60 DM |
791 | ret = device_init(ctx, width, height, *desired_format); |
792 | if (ret >= 0) | |
793 | break; | |
794 | else if (ret != AVERROR(EINVAL)) | |
795 | return ret; | |
796 | *desired_format = 0; | |
797 | } | |
798 | } | |
799 | ||
800 | if (*desired_format == 0) { | |
801 | av_log(ctx, AV_LOG_ERROR, "Cannot find a proper format for " | |
802 | "codec '%s' (id %d), pixel format '%s' (id %d)\n", | |
803 | avcodec_get_name(ctx->video_codec_id), ctx->video_codec_id, | |
804 | (char *)av_x_if_null(av_get_pix_fmt_name(pix_fmt), "none"), pix_fmt); | |
805 | ret = AVERROR(EINVAL); | |
806 | } | |
807 | } | |
808 | ||
f6fa7814 | 809 | *codec_id = ff_fmt_v4l2codec(*desired_format); |
2ba45a60 DM |
810 | av_assert0(*codec_id != AV_CODEC_ID_NONE); |
811 | return ret; | |
812 | } | |
813 | ||
f6fa7814 DM |
814 | static int v4l2_read_probe(AVProbeData *p) |
815 | { | |
816 | if (av_strstart(p->filename, "/dev/video", NULL)) | |
817 | return AVPROBE_SCORE_MAX - 1; | |
818 | return 0; | |
819 | } | |
820 | ||
2ba45a60 DM |
821 | static int v4l2_read_header(AVFormatContext *ctx) |
822 | { | |
823 | struct video_data *s = ctx->priv_data; | |
824 | AVStream *st; | |
825 | int res = 0; | |
826 | uint32_t desired_format; | |
827 | enum AVCodecID codec_id = AV_CODEC_ID_NONE; | |
828 | enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE; | |
829 | struct v4l2_input input = { 0 }; | |
830 | ||
831 | st = avformat_new_stream(ctx, NULL); | |
832 | if (!st) | |
833 | return AVERROR(ENOMEM); | |
834 | ||
835 | #if CONFIG_LIBV4L2 | |
836 | /* silence libv4l2 logging. if fopen() fails v4l2_log_file will be NULL | |
837 | and errors will get sent to stderr */ | |
838 | if (s->use_libv4l2) | |
839 | v4l2_log_file = fopen("/dev/null", "w"); | |
840 | #endif | |
841 | ||
842 | s->fd = device_open(ctx); | |
843 | if (s->fd < 0) | |
844 | return s->fd; | |
845 | ||
846 | if (s->channel != -1) { | |
847 | /* set video input */ | |
848 | av_log(ctx, AV_LOG_DEBUG, "Selecting input_channel: %d\n", s->channel); | |
849 | if (v4l2_ioctl(s->fd, VIDIOC_S_INPUT, &s->channel) < 0) { | |
850 | res = AVERROR(errno); | |
851 | av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_S_INPUT): %s\n", av_err2str(res)); | |
852 | goto fail; | |
853 | } | |
854 | } else { | |
855 | /* get current video input */ | |
856 | if (v4l2_ioctl(s->fd, VIDIOC_G_INPUT, &s->channel) < 0) { | |
857 | res = AVERROR(errno); | |
858 | av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_G_INPUT): %s\n", av_err2str(res)); | |
859 | goto fail; | |
860 | } | |
861 | } | |
862 | ||
863 | /* enum input */ | |
864 | input.index = s->channel; | |
865 | if (v4l2_ioctl(s->fd, VIDIOC_ENUMINPUT, &input) < 0) { | |
866 | res = AVERROR(errno); | |
867 | av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_ENUMINPUT): %s\n", av_err2str(res)); | |
868 | goto fail; | |
869 | } | |
870 | s->std_id = input.std; | |
871 | av_log(ctx, AV_LOG_DEBUG, "Current input_channel: %d, input_name: %s, input_std: %"PRIx64"\n", | |
872 | s->channel, input.name, (uint64_t)input.std); | |
873 | ||
874 | if (s->list_format) { | |
875 | list_formats(ctx, s->list_format); | |
876 | res = AVERROR_EXIT; | |
877 | goto fail; | |
878 | } | |
879 | ||
880 | if (s->list_standard) { | |
881 | list_standards(ctx); | |
882 | res = AVERROR_EXIT; | |
883 | goto fail; | |
884 | } | |
885 | ||
886 | avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */ | |
887 | ||
2ba45a60 DM |
888 | if (s->pixel_format) { |
889 | AVCodec *codec = avcodec_find_decoder_by_name(s->pixel_format); | |
890 | ||
891 | if (codec) | |
892 | ctx->video_codec_id = codec->id; | |
893 | ||
894 | pix_fmt = av_get_pix_fmt(s->pixel_format); | |
895 | ||
896 | if (pix_fmt == AV_PIX_FMT_NONE && !codec) { | |
897 | av_log(ctx, AV_LOG_ERROR, "No such input format: %s.\n", | |
898 | s->pixel_format); | |
899 | ||
900 | res = AVERROR(EINVAL); | |
901 | goto fail; | |
902 | } | |
903 | } | |
904 | ||
905 | if (!s->width && !s->height) { | |
906 | struct v4l2_format fmt = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE }; | |
907 | ||
908 | av_log(ctx, AV_LOG_VERBOSE, | |
909 | "Querying the device for the current frame size\n"); | |
910 | if (v4l2_ioctl(s->fd, VIDIOC_G_FMT, &fmt) < 0) { | |
911 | res = AVERROR(errno); | |
f6fa7814 DM |
912 | av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_G_FMT): %s\n", |
913 | av_err2str(res)); | |
2ba45a60 DM |
914 | goto fail; |
915 | } | |
916 | ||
917 | s->width = fmt.fmt.pix.width; | |
918 | s->height = fmt.fmt.pix.height; | |
919 | av_log(ctx, AV_LOG_VERBOSE, | |
920 | "Setting frame size to %dx%d\n", s->width, s->height); | |
921 | } | |
922 | ||
923 | res = device_try_init(ctx, pix_fmt, &s->width, &s->height, &desired_format, &codec_id); | |
924 | if (res < 0) | |
925 | goto fail; | |
926 | ||
927 | /* If no pixel_format was specified, the codec_id was not known up | |
928 | * until now. Set video_codec_id in the context, as codec_id will | |
929 | * not be available outside this function | |
930 | */ | |
931 | if (codec_id != AV_CODEC_ID_NONE && ctx->video_codec_id == AV_CODEC_ID_NONE) | |
932 | ctx->video_codec_id = codec_id; | |
933 | ||
934 | if ((res = av_image_check_size(s->width, s->height, 0, ctx)) < 0) | |
935 | goto fail; | |
936 | ||
f6fa7814 DM |
937 | s->pixelformat = desired_format; |
938 | ||
939 | if ((res = v4l2_set_parameters(ctx)) < 0) | |
940 | goto fail; | |
2ba45a60 | 941 | |
f6fa7814 | 942 | st->codec->pix_fmt = ff_fmt_v4l2ff(desired_format, codec_id); |
2ba45a60 DM |
943 | s->frame_size = |
944 | avpicture_get_size(st->codec->pix_fmt, s->width, s->height); | |
945 | ||
946 | if ((res = mmap_init(ctx)) || | |
947 | (res = mmap_start(ctx)) < 0) | |
948 | goto fail; | |
949 | ||
950 | s->top_field_first = first_field(s); | |
951 | ||
952 | st->codec->codec_type = AVMEDIA_TYPE_VIDEO; | |
953 | st->codec->codec_id = codec_id; | |
954 | if (codec_id == AV_CODEC_ID_RAWVIDEO) | |
955 | st->codec->codec_tag = | |
956 | avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt); | |
957 | else if (codec_id == AV_CODEC_ID_H264) { | |
958 | st->need_parsing = AVSTREAM_PARSE_HEADERS; | |
959 | } | |
960 | if (desired_format == V4L2_PIX_FMT_YVU420) | |
961 | st->codec->codec_tag = MKTAG('Y', 'V', '1', '2'); | |
962 | else if (desired_format == V4L2_PIX_FMT_YVU410) | |
963 | st->codec->codec_tag = MKTAG('Y', 'V', 'U', '9'); | |
964 | st->codec->width = s->width; | |
965 | st->codec->height = s->height; | |
966 | if (st->avg_frame_rate.den) | |
967 | st->codec->bit_rate = s->frame_size * av_q2d(st->avg_frame_rate) * 8; | |
968 | ||
969 | return 0; | |
970 | ||
971 | fail: | |
972 | v4l2_close(s->fd); | |
973 | return res; | |
974 | } | |
975 | ||
976 | static int v4l2_read_packet(AVFormatContext *ctx, AVPacket *pkt) | |
977 | { | |
978 | struct video_data *s = ctx->priv_data; | |
979 | AVFrame *frame = ctx->streams[0]->codec->coded_frame; | |
980 | int res; | |
981 | ||
982 | av_init_packet(pkt); | |
983 | if ((res = mmap_read_frame(ctx, pkt)) < 0) { | |
984 | return res; | |
985 | } | |
986 | ||
987 | if (frame && s->interlaced) { | |
988 | frame->interlaced_frame = 1; | |
989 | frame->top_field_first = s->top_field_first; | |
990 | } | |
991 | ||
992 | return pkt->size; | |
993 | } | |
994 | ||
995 | static int v4l2_read_close(AVFormatContext *ctx) | |
996 | { | |
997 | struct video_data *s = ctx->priv_data; | |
998 | ||
999 | if (avpriv_atomic_int_get(&s->buffers_queued) != s->buffers) | |
1000 | av_log(ctx, AV_LOG_WARNING, "Some buffers are still owned by the caller on " | |
1001 | "close.\n"); | |
1002 | ||
1003 | mmap_close(s); | |
1004 | ||
1005 | v4l2_close(s->fd); | |
1006 | return 0; | |
1007 | } | |
1008 | ||
1009 | #define OFFSET(x) offsetof(struct video_data, x) | |
1010 | #define DEC AV_OPT_FLAG_DECODING_PARAM | |
1011 | ||
1012 | static const AVOption options[] = { | |
1013 | { "standard", "set TV standard, used only by analog frame grabber", OFFSET(standard), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC }, | |
1014 | { "channel", "set TV channel, used only by frame grabber", OFFSET(channel), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, INT_MAX, DEC }, | |
1015 | { "video_size", "set frame size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, DEC }, | |
1016 | { "pixel_format", "set preferred pixel format", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, | |
1017 | { "input_format", "set preferred pixel format (for raw video) or codec name", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, | |
1018 | { "framerate", "set frame rate", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, | |
1019 | ||
1020 | { "list_formats", "list available formats and exit", OFFSET(list_format), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, DEC, "list_formats" }, | |
1021 | { "all", "show all available formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_ALLFORMATS }, 0, INT_MAX, DEC, "list_formats" }, | |
1022 | { "raw", "show only non-compressed formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_RAWFORMATS }, 0, INT_MAX, DEC, "list_formats" }, | |
1023 | { "compressed", "show only compressed formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_COMPFORMATS }, 0, INT_MAX, DEC, "list_formats" }, | |
1024 | ||
1025 | { "list_standards", "list supported standards and exit", OFFSET(list_standard), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, DEC, "list_standards" }, | |
1026 | { "all", "show all supported standards", OFFSET(list_standard), AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, DEC, "list_standards" }, | |
1027 | ||
1028 | { "timestamps", "set type of timestamps for grabbed frames", OFFSET(ts_mode), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 2, DEC, "timestamps" }, | |
1029 | { "ts", "set type of timestamps for grabbed frames", OFFSET(ts_mode), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 2, DEC, "timestamps" }, | |
1030 | { "default", "use timestamps from the kernel", OFFSET(ts_mode), AV_OPT_TYPE_CONST, {.i64 = V4L_TS_DEFAULT }, 0, 2, DEC, "timestamps" }, | |
1031 | { "abs", "use absolute timestamps (wall clock)", OFFSET(ts_mode), AV_OPT_TYPE_CONST, {.i64 = V4L_TS_ABS }, 0, 2, DEC, "timestamps" }, | |
1032 | { "mono2abs", "force conversion from monotonic to absolute timestamps", OFFSET(ts_mode), AV_OPT_TYPE_CONST, {.i64 = V4L_TS_MONO2ABS }, 0, 2, DEC, "timestamps" }, | |
1033 | { "use_libv4l2", "use libv4l2 (v4l-utils) conversion functions", OFFSET(use_libv4l2), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, DEC }, | |
1034 | { NULL }, | |
1035 | }; | |
1036 | ||
1037 | static const AVClass v4l2_class = { | |
1038 | .class_name = "V4L2 indev", | |
1039 | .item_name = av_default_item_name, | |
1040 | .option = options, | |
1041 | .version = LIBAVUTIL_VERSION_INT, | |
1042 | .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT, | |
1043 | }; | |
1044 | ||
1045 | AVInputFormat ff_v4l2_demuxer = { | |
1046 | .name = "video4linux2,v4l2", | |
1047 | .long_name = NULL_IF_CONFIG_SMALL("Video4Linux2 device grab"), | |
1048 | .priv_data_size = sizeof(struct video_data), | |
f6fa7814 | 1049 | .read_probe = v4l2_read_probe, |
2ba45a60 DM |
1050 | .read_header = v4l2_read_header, |
1051 | .read_packet = v4l2_read_packet, | |
1052 | .read_close = v4l2_read_close, | |
1053 | .flags = AVFMT_NOFILE, | |
1054 | .priv_class = &v4l2_class, | |
1055 | }; |