aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/v4l2-core
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@redhat.com>2012-06-14 15:35:52 -0400
committerMauro Carvalho Chehab <mchehab@redhat.com>2012-08-13 22:02:38 -0400
commit5bc3cb743bbab408792c1b4ef31adf6268aa4b7e (patch)
tree94faf3260c80a9626b450a6472780828cdf03b26 /drivers/media/v4l2-core
parent2ea4b442589b30210a166b9630c2547ebbe2cb82 (diff)
[media] v4l: move v4l2 core into a separate directory
Currently, the v4l2 core is mixed together with other non-core drivers. Move them into a separate directory. Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Diffstat (limited to 'drivers/media/v4l2-core')
-rw-r--r--drivers/media/v4l2-core/Kconfig60
-rw-r--r--drivers/media/v4l2-core/Makefile35
-rw-r--r--drivers/media/v4l2-core/tuner-core.c1354
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c623
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c1045
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c2651
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c1003
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c280
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c313
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c120
-rw-r--r--drivers/media/v4l2-core/v4l2-int-device.c164
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c2324
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c647
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c470
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c1189
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c510
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c633
-rw-r--r--drivers/media/v4l2-core/videobuf-dvb.c398
-rw-r--r--drivers/media/v4l2-core/videobuf-vmalloc.c349
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c2380
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c186
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c283
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c227
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c223
24 files changed, 17467 insertions, 0 deletions
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
new file mode 100644
index 000000000000..6f53337c4b4f
--- /dev/null
+++ b/drivers/media/v4l2-core/Kconfig
@@ -0,0 +1,60 @@
1#
2# Generic video config states
3#
4
5config VIDEO_V4L2
6 tristate
7 depends on VIDEO_DEV && VIDEO_V4L2_COMMON
8 default y
9
10config VIDEOBUF_GEN
11 tristate
12
13config VIDEOBUF_DMA_SG
14 depends on HAS_DMA
15 select VIDEOBUF_GEN
16 tristate
17
18config VIDEOBUF_VMALLOC
19 select VIDEOBUF_GEN
20 tristate
21
22config VIDEOBUF_DMA_CONTIG
23 depends on HAS_DMA
24 select VIDEOBUF_GEN
25 tristate
26
27config VIDEOBUF_DVB
28 tristate
29 select VIDEOBUF_GEN
30
31config VIDEO_TUNER
32 tristate
33 depends on MEDIA_TUNER
34
35config V4L2_MEM2MEM_DEV
36 tristate
37 depends on VIDEOBUF2_CORE
38
39config VIDEOBUF2_CORE
40 tristate
41
42config VIDEOBUF2_MEMOPS
43 tristate
44
45config VIDEOBUF2_DMA_CONTIG
46 select VIDEOBUF2_CORE
47 select VIDEOBUF2_MEMOPS
48 tristate
49
50config VIDEOBUF2_VMALLOC
51 select VIDEOBUF2_CORE
52 select VIDEOBUF2_MEMOPS
53 tristate
54
55config VIDEOBUF2_DMA_SG
56 #depends on HAS_DMA
57 select VIDEOBUF2_CORE
58 select VIDEOBUF2_MEMOPS
59 tristate
60
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
new file mode 100644
index 000000000000..7319c27e256b
--- /dev/null
+++ b/drivers/media/v4l2-core/Makefile
@@ -0,0 +1,35 @@
1#
2# Makefile for the V4L2 core
3#
4
5tuner-objs := tuner-core.o
6
7videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \
8 v4l2-event.o v4l2-ctrls.o v4l2-subdev.o
9ifeq ($(CONFIG_COMPAT),y)
10 videodev-objs += v4l2-compat-ioctl32.o
11endif
12
13obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-int-device.o
14obj-$(CONFIG_VIDEO_V4L2_COMMON) += v4l2-common.o
15
16obj-$(CONFIG_VIDEO_TUNER) += tuner.o
17
18obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o
19
20obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o
21obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o
22obj-$(CONFIG_VIDEOBUF_DMA_CONTIG) += videobuf-dma-contig.o
23obj-$(CONFIG_VIDEOBUF_VMALLOC) += videobuf-vmalloc.o
24obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o
25
26obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-core.o
27obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o
28obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o
29obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o
30obj-$(CONFIG_VIDEOBUF2_DMA_SG) += videobuf2-dma-sg.o
31
32ccflags-y += -I$(srctree)/drivers/media/dvb/dvb-core
33ccflags-y += -I$(srctree)/drivers/media/dvb/frontends
34ccflags-y += -I$(srctree)/drivers/media/common/tuners
35
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
new file mode 100644
index 000000000000..b5a819af2b8c
--- /dev/null
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -0,0 +1,1354 @@
1/*
2 * i2c tv tuner chip device driver
3 * core core, i.e. kernel interfaces, registering and so on
4 *
5 * Copyright(c) by Ralph Metzler, Gerd Knorr, Gunther Mayer
6 *
7 * Copyright(c) 2005-2011 by Mauro Carvalho Chehab
8 * - Added support for a separate Radio tuner
9 * - Major rework and cleanups at the code
10 *
11 * This driver supports many devices and the idea is to let the driver
12 * detect which device is present. So rather than listing all supported
13 * devices here, we pretend to support a single, fake device type that will
14 * handle both radio and analog TV tuning.
15 */
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/string.h>
20#include <linux/timer.h>
21#include <linux/delay.h>
22#include <linux/errno.h>
23#include <linux/slab.h>
24#include <linux/poll.h>
25#include <linux/i2c.h>
26#include <linux/types.h>
27#include <linux/init.h>
28#include <linux/videodev2.h>
29#include <media/tuner.h>
30#include <media/tuner-types.h>
31#include <media/v4l2-device.h>
32#include <media/v4l2-ioctl.h>
33#include "mt20xx.h"
34#include "tda8290.h"
35#include "tea5761.h"
36#include "tea5767.h"
37#include "tuner-xc2028.h"
38#include "tuner-simple.h"
39#include "tda9887.h"
40#include "xc5000.h"
41#include "tda18271.h"
42#include "xc4000.h"
43
44#define UNSET (-1U)
45
46#define PREFIX (t->i2c->driver->driver.name)
47
48/*
49 * Driver modprobe parameters
50 */
51
52/* insmod options used at init time => read/only */
53static unsigned int addr;
54static unsigned int no_autodetect;
55static unsigned int show_i2c;
56
57module_param(addr, int, 0444);
58module_param(no_autodetect, int, 0444);
59module_param(show_i2c, int, 0444);
60
61/* insmod options used at runtime => read/write */
62static int tuner_debug;
63static unsigned int tv_range[2] = { 44, 958 };
64static unsigned int radio_range[2] = { 65, 108 };
65static char pal[] = "--";
66static char secam[] = "--";
67static char ntsc[] = "-";
68
69module_param_named(debug, tuner_debug, int, 0644);
70module_param_array(tv_range, int, NULL, 0644);
71module_param_array(radio_range, int, NULL, 0644);
72module_param_string(pal, pal, sizeof(pal), 0644);
73module_param_string(secam, secam, sizeof(secam), 0644);
74module_param_string(ntsc, ntsc, sizeof(ntsc), 0644);
75
76/*
77 * Static vars
78 */
79
80static LIST_HEAD(tuner_list);
81static const struct v4l2_subdev_ops tuner_ops;
82
83/*
84 * Debug macros
85 */
86
87#define tuner_warn(fmt, arg...) do { \
88 printk(KERN_WARNING "%s %d-%04x: " fmt, PREFIX, \
89 i2c_adapter_id(t->i2c->adapter), \
90 t->i2c->addr, ##arg); \
91 } while (0)
92
93#define tuner_info(fmt, arg...) do { \
94 printk(KERN_INFO "%s %d-%04x: " fmt, PREFIX, \
95 i2c_adapter_id(t->i2c->adapter), \
96 t->i2c->addr, ##arg); \
97 } while (0)
98
99#define tuner_err(fmt, arg...) do { \
100 printk(KERN_ERR "%s %d-%04x: " fmt, PREFIX, \
101 i2c_adapter_id(t->i2c->adapter), \
102 t->i2c->addr, ##arg); \
103 } while (0)
104
105#define tuner_dbg(fmt, arg...) do { \
106 if (tuner_debug) \
107 printk(KERN_DEBUG "%s %d-%04x: " fmt, PREFIX, \
108 i2c_adapter_id(t->i2c->adapter), \
109 t->i2c->addr, ##arg); \
110 } while (0)
111
112/*
113 * Internal struct used inside the driver
114 */
115
116struct tuner {
117 /* device */
118 struct dvb_frontend fe;
119 struct i2c_client *i2c;
120 struct v4l2_subdev sd;
121 struct list_head list;
122
123 /* keep track of the current settings */
124 v4l2_std_id std;
125 unsigned int tv_freq;
126 unsigned int radio_freq;
127 unsigned int audmode;
128
129 enum v4l2_tuner_type mode;
130 unsigned int mode_mask; /* Combination of allowable modes */
131
132 bool standby; /* Standby mode */
133
134 unsigned int type; /* chip type id */
135 unsigned int config;
136 const char *name;
137};
138
139/*
140 * Function prototypes
141 */
142
143static void set_tv_freq(struct i2c_client *c, unsigned int freq);
144static void set_radio_freq(struct i2c_client *c, unsigned int freq);
145
146/*
147 * tuner attach/detach logic
148 */
149
150/* This macro allows us to probe dynamically, avoiding static links */
151#ifdef CONFIG_MEDIA_ATTACH
152#define tuner_symbol_probe(FUNCTION, ARGS...) ({ \
153 int __r = -EINVAL; \
154 typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
155 if (__a) { \
156 __r = (int) __a(ARGS); \
157 symbol_put(FUNCTION); \
158 } else { \
159 printk(KERN_ERR "TUNER: Unable to find " \
160 "symbol "#FUNCTION"()\n"); \
161 } \
162 __r; \
163})
164
165static void tuner_detach(struct dvb_frontend *fe)
166{
167 if (fe->ops.tuner_ops.release) {
168 fe->ops.tuner_ops.release(fe);
169 symbol_put_addr(fe->ops.tuner_ops.release);
170 }
171 if (fe->ops.analog_ops.release) {
172 fe->ops.analog_ops.release(fe);
173 symbol_put_addr(fe->ops.analog_ops.release);
174 }
175}
176#else
177#define tuner_symbol_probe(FUNCTION, ARGS...) ({ \
178 FUNCTION(ARGS); \
179})
180
181static void tuner_detach(struct dvb_frontend *fe)
182{
183 if (fe->ops.tuner_ops.release)
184 fe->ops.tuner_ops.release(fe);
185 if (fe->ops.analog_ops.release)
186 fe->ops.analog_ops.release(fe);
187}
188#endif
189
190
191static inline struct tuner *to_tuner(struct v4l2_subdev *sd)
192{
193 return container_of(sd, struct tuner, sd);
194}
195
196/*
197 * struct analog_demod_ops callbacks
198 */
199
200static void fe_set_params(struct dvb_frontend *fe,
201 struct analog_parameters *params)
202{
203 struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
204 struct tuner *t = fe->analog_demod_priv;
205
206 if (NULL == fe_tuner_ops->set_analog_params) {
207 tuner_warn("Tuner frontend module has no way to set freq\n");
208 return;
209 }
210 fe_tuner_ops->set_analog_params(fe, params);
211}
212
213static void fe_standby(struct dvb_frontend *fe)
214{
215 struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
216
217 if (fe_tuner_ops->sleep)
218 fe_tuner_ops->sleep(fe);
219}
220
221static int fe_has_signal(struct dvb_frontend *fe)
222{
223 u16 strength = 0;
224
225 if (fe->ops.tuner_ops.get_rf_strength)
226 fe->ops.tuner_ops.get_rf_strength(fe, &strength);
227
228 return strength;
229}
230
231static int fe_get_afc(struct dvb_frontend *fe)
232{
233 s32 afc = 0;
234
235 if (fe->ops.tuner_ops.get_afc)
236 fe->ops.tuner_ops.get_afc(fe, &afc);
237
238 return 0;
239}
240
241static int fe_set_config(struct dvb_frontend *fe, void *priv_cfg)
242{
243 struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
244 struct tuner *t = fe->analog_demod_priv;
245
246 if (fe_tuner_ops->set_config)
247 return fe_tuner_ops->set_config(fe, priv_cfg);
248
249 tuner_warn("Tuner frontend module has no way to set config\n");
250
251 return 0;
252}
253
254static void tuner_status(struct dvb_frontend *fe);
255
256static struct analog_demod_ops tuner_analog_ops = {
257 .set_params = fe_set_params,
258 .standby = fe_standby,
259 .has_signal = fe_has_signal,
260 .get_afc = fe_get_afc,
261 .set_config = fe_set_config,
262 .tuner_status = tuner_status
263};
264
265/*
266 * Functions to select between radio and TV and tuner probe/remove functions
267 */
268
269/**
270 * set_type - Sets the tuner type for a given device
271 *
272 * @c: i2c_client descriptoy
273 * @type: type of the tuner (e. g. tuner number)
274 * @new_mode_mask: Indicates if tuner supports TV and/or Radio
275 * @new_config: an optional parameter ranging from 0-255 used by
276 a few tuners to adjust an internal parameter,
277 like LNA mode
278 * @tuner_callback: an optional function to be called when switching
279 * to analog mode
280 *
281 * This function applys the tuner config to tuner specified
282 * by tun_setup structure. It contains several per-tuner initialization "magic"
283 */
284static void set_type(struct i2c_client *c, unsigned int type,
285 unsigned int new_mode_mask, unsigned int new_config,
286 int (*tuner_callback) (void *dev, int component, int cmd, int arg))
287{
288 struct tuner *t = to_tuner(i2c_get_clientdata(c));
289 struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
290 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
291 unsigned char buffer[4];
292 int tune_now = 1;
293
294 if (type == UNSET || type == TUNER_ABSENT) {
295 tuner_dbg("tuner 0x%02x: Tuner type absent\n", c->addr);
296 return;
297 }
298
299 t->type = type;
300 /* prevent invalid config values */
301 t->config = new_config < 256 ? new_config : 0;
302 if (tuner_callback != NULL) {
303 tuner_dbg("defining GPIO callback\n");
304 t->fe.callback = tuner_callback;
305 }
306
307 /* discard private data, in case set_type() was previously called */
308 tuner_detach(&t->fe);
309 t->fe.analog_demod_priv = NULL;
310
311 switch (t->type) {
312 case TUNER_MT2032:
313 if (!dvb_attach(microtune_attach,
314 &t->fe, t->i2c->adapter, t->i2c->addr))
315 goto attach_failed;
316 break;
317 case TUNER_PHILIPS_TDA8290:
318 {
319 struct tda829x_config cfg = {
320 .lna_cfg = t->config,
321 };
322 if (!dvb_attach(tda829x_attach, &t->fe, t->i2c->adapter,
323 t->i2c->addr, &cfg))
324 goto attach_failed;
325 break;
326 }
327 case TUNER_TEA5767:
328 if (!dvb_attach(tea5767_attach, &t->fe,
329 t->i2c->adapter, t->i2c->addr))
330 goto attach_failed;
331 t->mode_mask = T_RADIO;
332 break;
333 case TUNER_TEA5761:
334 if (!dvb_attach(tea5761_attach, &t->fe,
335 t->i2c->adapter, t->i2c->addr))
336 goto attach_failed;
337 t->mode_mask = T_RADIO;
338 break;
339 case TUNER_PHILIPS_FMD1216ME_MK3:
340 case TUNER_PHILIPS_FMD1216MEX_MK3:
341 buffer[0] = 0x0b;
342 buffer[1] = 0xdc;
343 buffer[2] = 0x9c;
344 buffer[3] = 0x60;
345 i2c_master_send(c, buffer, 4);
346 mdelay(1);
347 buffer[2] = 0x86;
348 buffer[3] = 0x54;
349 i2c_master_send(c, buffer, 4);
350 if (!dvb_attach(simple_tuner_attach, &t->fe,
351 t->i2c->adapter, t->i2c->addr, t->type))
352 goto attach_failed;
353 break;
354 case TUNER_PHILIPS_TD1316:
355 buffer[0] = 0x0b;
356 buffer[1] = 0xdc;
357 buffer[2] = 0x86;
358 buffer[3] = 0xa4;
359 i2c_master_send(c, buffer, 4);
360 if (!dvb_attach(simple_tuner_attach, &t->fe,
361 t->i2c->adapter, t->i2c->addr, t->type))
362 goto attach_failed;
363 break;
364 case TUNER_XC2028:
365 {
366 struct xc2028_config cfg = {
367 .i2c_adap = t->i2c->adapter,
368 .i2c_addr = t->i2c->addr,
369 };
370 if (!dvb_attach(xc2028_attach, &t->fe, &cfg))
371 goto attach_failed;
372 tune_now = 0;
373 break;
374 }
375 case TUNER_TDA9887:
376 if (!dvb_attach(tda9887_attach,
377 &t->fe, t->i2c->adapter, t->i2c->addr))
378 goto attach_failed;
379 break;
380 case TUNER_XC5000:
381 {
382 struct xc5000_config xc5000_cfg = {
383 .i2c_address = t->i2c->addr,
384 /* if_khz will be set at dvb_attach() */
385 .if_khz = 0,
386 };
387
388 if (!dvb_attach(xc5000_attach,
389 &t->fe, t->i2c->adapter, &xc5000_cfg))
390 goto attach_failed;
391 tune_now = 0;
392 break;
393 }
394 case TUNER_XC5000C:
395 {
396 struct xc5000_config xc5000c_cfg = {
397 .i2c_address = t->i2c->addr,
398 /* if_khz will be set at dvb_attach() */
399 .if_khz = 0,
400 .chip_id = XC5000C,
401 };
402
403 if (!dvb_attach(xc5000_attach,
404 &t->fe, t->i2c->adapter, &xc5000c_cfg))
405 goto attach_failed;
406 tune_now = 0;
407 break;
408 }
409 case TUNER_NXP_TDA18271:
410 {
411 struct tda18271_config cfg = {
412 .config = t->config,
413 .small_i2c = TDA18271_03_BYTE_CHUNK_INIT,
414 };
415
416 if (!dvb_attach(tda18271_attach, &t->fe, t->i2c->addr,
417 t->i2c->adapter, &cfg))
418 goto attach_failed;
419 tune_now = 0;
420 break;
421 }
422 case TUNER_XC4000:
423 {
424 struct xc4000_config xc4000_cfg = {
425 .i2c_address = t->i2c->addr,
426 /* FIXME: the correct parameters will be set */
427 /* only when the digital dvb_attach() occurs */
428 .default_pm = 0,
429 .dvb_amplitude = 0,
430 .set_smoothedcvbs = 0,
431 .if_khz = 0
432 };
433 if (!dvb_attach(xc4000_attach,
434 &t->fe, t->i2c->adapter, &xc4000_cfg))
435 goto attach_failed;
436 tune_now = 0;
437 break;
438 }
439 default:
440 if (!dvb_attach(simple_tuner_attach, &t->fe,
441 t->i2c->adapter, t->i2c->addr, t->type))
442 goto attach_failed;
443
444 break;
445 }
446
447 if ((NULL == analog_ops->set_params) &&
448 (fe_tuner_ops->set_analog_params)) {
449
450 t->name = fe_tuner_ops->info.name;
451
452 t->fe.analog_demod_priv = t;
453 memcpy(analog_ops, &tuner_analog_ops,
454 sizeof(struct analog_demod_ops));
455
456 } else {
457 t->name = analog_ops->info.name;
458 }
459
460 tuner_dbg("type set to %s\n", t->name);
461
462 t->mode_mask = new_mode_mask;
463
464 /* Some tuners require more initialization setup before use,
465 such as firmware download or device calibration.
466 trying to set a frequency here will just fail
467 FIXME: better to move set_freq to the tuner code. This is needed
468 on analog tuners for PLL to properly work
469 */
470 if (tune_now) {
471 if (V4L2_TUNER_RADIO == t->mode)
472 set_radio_freq(c, t->radio_freq);
473 else
474 set_tv_freq(c, t->tv_freq);
475 }
476
477 tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
478 c->adapter->name, c->driver->driver.name, c->addr << 1, type,
479 t->mode_mask);
480 return;
481
482attach_failed:
483 tuner_dbg("Tuner attach for type = %d failed.\n", t->type);
484 t->type = TUNER_ABSENT;
485
486 return;
487}
488
489/**
490 * tuner_s_type_addr - Sets the tuner type for a device
491 *
492 * @sd: subdev descriptor
493 * @tun_setup: type to be associated to a given tuner i2c address
494 *
495 * This function applys the tuner config to tuner specified
496 * by tun_setup structure.
497 * If tuner I2C address is UNSET, then it will only set the device
498 * if the tuner supports the mode specified in the call.
499 * If the address is specified, the change will be applied only if
500 * tuner I2C address matches.
501 * The call can change the tuner number and the tuner mode.
502 */
503static int tuner_s_type_addr(struct v4l2_subdev *sd,
504 struct tuner_setup *tun_setup)
505{
506 struct tuner *t = to_tuner(sd);
507 struct i2c_client *c = v4l2_get_subdevdata(sd);
508
509 tuner_dbg("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=0x%02x\n",
510 tun_setup->type,
511 tun_setup->addr,
512 tun_setup->mode_mask,
513 tun_setup->config);
514
515 if ((t->type == UNSET && ((tun_setup->addr == ADDR_UNSET) &&
516 (t->mode_mask & tun_setup->mode_mask))) ||
517 (tun_setup->addr == c->addr)) {
518 set_type(c, tun_setup->type, tun_setup->mode_mask,
519 tun_setup->config, tun_setup->tuner_callback);
520 } else
521 tuner_dbg("set addr discarded for type %i, mask %x. "
522 "Asked to change tuner at addr 0x%02x, with mask %x\n",
523 t->type, t->mode_mask,
524 tun_setup->addr, tun_setup->mode_mask);
525
526 return 0;
527}
528
529/**
530 * tuner_s_config - Sets tuner configuration
531 *
532 * @sd: subdev descriptor
533 * @cfg: tuner configuration
534 *
535 * Calls tuner set_config() private function to set some tuner-internal
536 * parameters
537 */
538static int tuner_s_config(struct v4l2_subdev *sd,
539 const struct v4l2_priv_tun_config *cfg)
540{
541 struct tuner *t = to_tuner(sd);
542 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
543
544 if (t->type != cfg->tuner)
545 return 0;
546
547 if (analog_ops->set_config) {
548 analog_ops->set_config(&t->fe, cfg->priv);
549 return 0;
550 }
551
552 tuner_dbg("Tuner frontend module has no way to set config\n");
553 return 0;
554}
555
556/**
557 * tuner_lookup - Seek for tuner adapters
558 *
559 * @adap: i2c_adapter struct
560 * @radio: pointer to be filled if the adapter is radio
561 * @tv: pointer to be filled if the adapter is TV
562 *
563 * Search for existing radio and/or TV tuners on the given I2C adapter,
564 * discarding demod-only adapters (tda9887).
565 *
566 * Note that when this function is called from tuner_probe you can be
567 * certain no other devices will be added/deleted at the same time, I2C
568 * core protects against that.
569 */
570static void tuner_lookup(struct i2c_adapter *adap,
571 struct tuner **radio, struct tuner **tv)
572{
573 struct tuner *pos;
574
575 *radio = NULL;
576 *tv = NULL;
577
578 list_for_each_entry(pos, &tuner_list, list) {
579 int mode_mask;
580
581 if (pos->i2c->adapter != adap ||
582 strcmp(pos->i2c->driver->driver.name, "tuner"))
583 continue;
584
585 mode_mask = pos->mode_mask;
586 if (*radio == NULL && mode_mask == T_RADIO)
587 *radio = pos;
588 /* Note: currently TDA9887 is the only demod-only
589 device. If other devices appear then we need to
590 make this test more general. */
591 else if (*tv == NULL && pos->type != TUNER_TDA9887 &&
592 (pos->mode_mask & T_ANALOG_TV))
593 *tv = pos;
594 }
595}
596
597/**
598 *tuner_probe - Probes the existing tuners on an I2C bus
599 *
600 * @client: i2c_client descriptor
601 * @id: not used
602 *
603 * This routine probes for tuners at the expected I2C addresses. On most
604 * cases, if a device answers to a given I2C address, it assumes that the
605 * device is a tuner. On a few cases, however, an additional logic is needed
606 * to double check if the device is really a tuner, or to identify the tuner
607 * type, like on tea5767/5761 devices.
608 *
609 * During client attach, set_type is called by adapter's attach_inform callback.
610 * set_type must then be completed by tuner_probe.
611 */
612static int tuner_probe(struct i2c_client *client,
613 const struct i2c_device_id *id)
614{
615 struct tuner *t;
616 struct tuner *radio;
617 struct tuner *tv;
618
619 t = kzalloc(sizeof(struct tuner), GFP_KERNEL);
620 if (NULL == t)
621 return -ENOMEM;
622 v4l2_i2c_subdev_init(&t->sd, client, &tuner_ops);
623 t->i2c = client;
624 t->name = "(tuner unset)";
625 t->type = UNSET;
626 t->audmode = V4L2_TUNER_MODE_STEREO;
627 t->standby = 1;
628 t->radio_freq = 87.5 * 16000; /* Initial freq range */
629 t->tv_freq = 400 * 16; /* Sets freq to VHF High - needed for some PLL's to properly start */
630
631 if (show_i2c) {
632 unsigned char buffer[16];
633 int i, rc;
634
635 memset(buffer, 0, sizeof(buffer));
636 rc = i2c_master_recv(client, buffer, sizeof(buffer));
637 tuner_info("I2C RECV = ");
638 for (i = 0; i < rc; i++)
639 printk(KERN_CONT "%02x ", buffer[i]);
640 printk("\n");
641 }
642
643 /* autodetection code based on the i2c addr */
644 if (!no_autodetect) {
645 switch (client->addr) {
646 case 0x10:
647 if (tuner_symbol_probe(tea5761_autodetection,
648 t->i2c->adapter,
649 t->i2c->addr) >= 0) {
650 t->type = TUNER_TEA5761;
651 t->mode_mask = T_RADIO;
652 tuner_lookup(t->i2c->adapter, &radio, &tv);
653 if (tv)
654 tv->mode_mask &= ~T_RADIO;
655
656 goto register_client;
657 }
658 kfree(t);
659 return -ENODEV;
660 case 0x42:
661 case 0x43:
662 case 0x4a:
663 case 0x4b:
664 /* If chip is not tda8290, don't register.
665 since it can be tda9887*/
666 if (tuner_symbol_probe(tda829x_probe, t->i2c->adapter,
667 t->i2c->addr) >= 0) {
668 tuner_dbg("tda829x detected\n");
669 } else {
670 /* Default is being tda9887 */
671 t->type = TUNER_TDA9887;
672 t->mode_mask = T_RADIO | T_ANALOG_TV;
673 goto register_client;
674 }
675 break;
676 case 0x60:
677 if (tuner_symbol_probe(tea5767_autodetection,
678 t->i2c->adapter, t->i2c->addr)
679 >= 0) {
680 t->type = TUNER_TEA5767;
681 t->mode_mask = T_RADIO;
682 /* Sets freq to FM range */
683 tuner_lookup(t->i2c->adapter, &radio, &tv);
684 if (tv)
685 tv->mode_mask &= ~T_RADIO;
686
687 goto register_client;
688 }
689 break;
690 }
691 }
692
693 /* Initializes only the first TV tuner on this adapter. Why only the
694 first? Because there are some devices (notably the ones with TI
695 tuners) that have more than one i2c address for the *same* device.
696 Experience shows that, except for just one case, the first
697 address is the right one. The exception is a Russian tuner
698 (ACORP_Y878F). So, the desired behavior is just to enable the
699 first found TV tuner. */
700 tuner_lookup(t->i2c->adapter, &radio, &tv);
701 if (tv == NULL) {
702 t->mode_mask = T_ANALOG_TV;
703 if (radio == NULL)
704 t->mode_mask |= T_RADIO;
705 tuner_dbg("Setting mode_mask to 0x%02x\n", t->mode_mask);
706 }
707
708 /* Should be just before return */
709register_client:
710 /* Sets a default mode */
711 if (t->mode_mask & T_ANALOG_TV)
712 t->mode = V4L2_TUNER_ANALOG_TV;
713 else
714 t->mode = V4L2_TUNER_RADIO;
715 set_type(client, t->type, t->mode_mask, t->config, t->fe.callback);
716 list_add_tail(&t->list, &tuner_list);
717
718 tuner_info("Tuner %d found with type(s)%s%s.\n",
719 t->type,
720 t->mode_mask & T_RADIO ? " Radio" : "",
721 t->mode_mask & T_ANALOG_TV ? " TV" : "");
722 return 0;
723}
724
725/**
726 * tuner_remove - detaches a tuner
727 *
728 * @client: i2c_client descriptor
729 */
730
731static int tuner_remove(struct i2c_client *client)
732{
733 struct tuner *t = to_tuner(i2c_get_clientdata(client));
734
735 v4l2_device_unregister_subdev(&t->sd);
736 tuner_detach(&t->fe);
737 t->fe.analog_demod_priv = NULL;
738
739 list_del(&t->list);
740 kfree(t);
741 return 0;
742}
743
744/*
745 * Functions to switch between Radio and TV
746 *
747 * A few cards have a separate I2C tuner for radio. Those routines
748 * take care of switching between TV/Radio mode, filtering only the
749 * commands that apply to the Radio or TV tuner.
750 */
751
752/**
753 * check_mode - Verify if tuner supports the requested mode
754 * @t: a pointer to the module's internal struct_tuner
755 *
756 * This function checks if the tuner is capable of tuning analog TV,
757 * digital TV or radio, depending on what the caller wants. If the
758 * tuner can't support that mode, it returns -EINVAL. Otherwise, it
759 * returns 0.
760 * This function is needed for boards that have a separate tuner for
761 * radio (like devices with tea5767).
762 * NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to
763 * select a TV frequency. So, t_mode = T_ANALOG_TV could actually
764 * be used to represent a Digital TV too.
765 */
766static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode)
767{
768 int t_mode;
769 if (mode == V4L2_TUNER_RADIO)
770 t_mode = T_RADIO;
771 else
772 t_mode = T_ANALOG_TV;
773
774 if ((t_mode & t->mode_mask) == 0)
775 return -EINVAL;
776
777 return 0;
778}
779
780/**
781 * set_mode - Switch tuner to other mode.
782 * @t: a pointer to the module's internal struct_tuner
783 * @mode: enum v4l2_type (radio or TV)
784 *
785 * If tuner doesn't support the needed mode (radio or TV), prints a
786 * debug message and returns -EINVAL, changing its state to standby.
787 * Otherwise, changes the mode and returns 0.
788 */
789static int set_mode(struct tuner *t, enum v4l2_tuner_type mode)
790{
791 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
792
793 if (mode != t->mode) {
794 if (check_mode(t, mode) == -EINVAL) {
795 tuner_dbg("Tuner doesn't support mode %d. "
796 "Putting tuner to sleep\n", mode);
797 t->standby = true;
798 if (analog_ops->standby)
799 analog_ops->standby(&t->fe);
800 return -EINVAL;
801 }
802 t->mode = mode;
803 tuner_dbg("Changing to mode %d\n", mode);
804 }
805 return 0;
806}
807
808/**
809 * set_freq - Set the tuner to the desired frequency.
810 * @t: a pointer to the module's internal struct_tuner
811 * @freq: frequency to set (0 means to use the current frequency)
812 */
813static void set_freq(struct tuner *t, unsigned int freq)
814{
815 struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
816
817 if (t->mode == V4L2_TUNER_RADIO) {
818 if (!freq)
819 freq = t->radio_freq;
820 set_radio_freq(client, freq);
821 } else {
822 if (!freq)
823 freq = t->tv_freq;
824 set_tv_freq(client, freq);
825 }
826}
827
828/*
829 * Functions that are specific for TV mode
830 */
831
832/**
833 * set_tv_freq - Set tuner frequency, freq in Units of 62.5 kHz = 1/16MHz
834 *
835 * @c: i2c_client descriptor
836 * @freq: frequency
837 */
838static void set_tv_freq(struct i2c_client *c, unsigned int freq)
839{
840 struct tuner *t = to_tuner(i2c_get_clientdata(c));
841 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
842
843 struct analog_parameters params = {
844 .mode = t->mode,
845 .audmode = t->audmode,
846 .std = t->std
847 };
848
849 if (t->type == UNSET) {
850 tuner_warn("tuner type not set\n");
851 return;
852 }
853 if (NULL == analog_ops->set_params) {
854 tuner_warn("Tuner has no way to set tv freq\n");
855 return;
856 }
857 if (freq < tv_range[0] * 16 || freq > tv_range[1] * 16) {
858 tuner_dbg("TV freq (%d.%02d) out of range (%d-%d)\n",
859 freq / 16, freq % 16 * 100 / 16, tv_range[0],
860 tv_range[1]);
861 /* V4L2 spec: if the freq is not possible then the closest
862 possible value should be selected */
863 if (freq < tv_range[0] * 16)
864 freq = tv_range[0] * 16;
865 else
866 freq = tv_range[1] * 16;
867 }
868 params.frequency = freq;
869 tuner_dbg("tv freq set to %d.%02d\n",
870 freq / 16, freq % 16 * 100 / 16);
871 t->tv_freq = freq;
872 t->standby = false;
873
874 analog_ops->set_params(&t->fe, &params);
875}
876
877/**
878 * tuner_fixup_std - force a given video standard variant
879 *
880 * @t: tuner internal struct
881 * @std: TV standard
882 *
883 * A few devices or drivers have problem to detect some standard variations.
884 * On other operational systems, the drivers generally have a per-country
885 * code, and some logic to apply per-country hacks. V4L2 API doesn't provide
886 * such hacks. Instead, it relies on a proper video standard selection from
887 * the userspace application. However, as some apps are buggy, not allowing
888 * to distinguish all video standard variations, a modprobe parameter can
889 * be used to force a video standard match.
890 */
891static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
892{
893 if (pal[0] != '-' && (std & V4L2_STD_PAL) == V4L2_STD_PAL) {
894 switch (pal[0]) {
895 case '6':
896 return V4L2_STD_PAL_60;
897 case 'b':
898 case 'B':
899 case 'g':
900 case 'G':
901 return V4L2_STD_PAL_BG;
902 case 'i':
903 case 'I':
904 return V4L2_STD_PAL_I;
905 case 'd':
906 case 'D':
907 case 'k':
908 case 'K':
909 return V4L2_STD_PAL_DK;
910 case 'M':
911 case 'm':
912 return V4L2_STD_PAL_M;
913 case 'N':
914 case 'n':
915 if (pal[1] == 'c' || pal[1] == 'C')
916 return V4L2_STD_PAL_Nc;
917 return V4L2_STD_PAL_N;
918 default:
919 tuner_warn("pal= argument not recognised\n");
920 break;
921 }
922 }
923 if (secam[0] != '-' && (std & V4L2_STD_SECAM) == V4L2_STD_SECAM) {
924 switch (secam[0]) {
925 case 'b':
926 case 'B':
927 case 'g':
928 case 'G':
929 case 'h':
930 case 'H':
931 return V4L2_STD_SECAM_B |
932 V4L2_STD_SECAM_G |
933 V4L2_STD_SECAM_H;
934 case 'd':
935 case 'D':
936 case 'k':
937 case 'K':
938 return V4L2_STD_SECAM_DK;
939 case 'l':
940 case 'L':
941 if ((secam[1] == 'C') || (secam[1] == 'c'))
942 return V4L2_STD_SECAM_LC;
943 return V4L2_STD_SECAM_L;
944 default:
945 tuner_warn("secam= argument not recognised\n");
946 break;
947 }
948 }
949
950 if (ntsc[0] != '-' && (std & V4L2_STD_NTSC) == V4L2_STD_NTSC) {
951 switch (ntsc[0]) {
952 case 'm':
953 case 'M':
954 return V4L2_STD_NTSC_M;
955 case 'j':
956 case 'J':
957 return V4L2_STD_NTSC_M_JP;
958 case 'k':
959 case 'K':
960 return V4L2_STD_NTSC_M_KR;
961 default:
962 tuner_info("ntsc= argument not recognised\n");
963 break;
964 }
965 }
966 return std;
967}
968
969/*
970 * Functions that are specific for Radio mode
971 */
972
973/**
974 * set_radio_freq - Set tuner frequency, freq in Units of 62.5 Hz = 1/16kHz
975 *
976 * @c: i2c_client descriptor
977 * @freq: frequency
978 */
979static void set_radio_freq(struct i2c_client *c, unsigned int freq)
980{
981 struct tuner *t = to_tuner(i2c_get_clientdata(c));
982 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
983
984 struct analog_parameters params = {
985 .mode = t->mode,
986 .audmode = t->audmode,
987 .std = t->std
988 };
989
990 if (t->type == UNSET) {
991 tuner_warn("tuner type not set\n");
992 return;
993 }
994 if (NULL == analog_ops->set_params) {
995 tuner_warn("tuner has no way to set radio frequency\n");
996 return;
997 }
998 if (freq < radio_range[0] * 16000 || freq > radio_range[1] * 16000) {
999 tuner_dbg("radio freq (%d.%02d) out of range (%d-%d)\n",
1000 freq / 16000, freq % 16000 * 100 / 16000,
1001 radio_range[0], radio_range[1]);
1002 /* V4L2 spec: if the freq is not possible then the closest
1003 possible value should be selected */
1004 if (freq < radio_range[0] * 16000)
1005 freq = radio_range[0] * 16000;
1006 else
1007 freq = radio_range[1] * 16000;
1008 }
1009 params.frequency = freq;
1010 tuner_dbg("radio freq set to %d.%02d\n",
1011 freq / 16000, freq % 16000 * 100 / 16000);
1012 t->radio_freq = freq;
1013 t->standby = false;
1014
1015 analog_ops->set_params(&t->fe, &params);
1016}
1017
1018/*
1019 * Debug function for reporting tuner status to userspace
1020 */
1021
1022/**
1023 * tuner_status - Dumps the current tuner status at dmesg
1024 * @fe: pointer to struct dvb_frontend
1025 *
1026 * This callback is used only for driver debug purposes, answering to
1027 * VIDIOC_LOG_STATUS. No changes should happen on this call.
1028 */
1029static void tuner_status(struct dvb_frontend *fe)
1030{
1031 struct tuner *t = fe->analog_demod_priv;
1032 unsigned long freq, freq_fraction;
1033 struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
1034 struct analog_demod_ops *analog_ops = &fe->ops.analog_ops;
1035 const char *p;
1036
1037 switch (t->mode) {
1038 case V4L2_TUNER_RADIO:
1039 p = "radio";
1040 break;
1041 case V4L2_TUNER_DIGITAL_TV: /* Used by mt20xx */
1042 p = "digital TV";
1043 break;
1044 case V4L2_TUNER_ANALOG_TV:
1045 default:
1046 p = "analog TV";
1047 break;
1048 }
1049 if (t->mode == V4L2_TUNER_RADIO) {
1050 freq = t->radio_freq / 16000;
1051 freq_fraction = (t->radio_freq % 16000) * 100 / 16000;
1052 } else {
1053 freq = t->tv_freq / 16;
1054 freq_fraction = (t->tv_freq % 16) * 100 / 16;
1055 }
1056 tuner_info("Tuner mode: %s%s\n", p,
1057 t->standby ? " on standby mode" : "");
1058 tuner_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction);
1059 tuner_info("Standard: 0x%08lx\n", (unsigned long)t->std);
1060 if (t->mode != V4L2_TUNER_RADIO)
1061 return;
1062 if (fe_tuner_ops->get_status) {
1063 u32 tuner_status;
1064
1065 fe_tuner_ops->get_status(&t->fe, &tuner_status);
1066 if (tuner_status & TUNER_STATUS_LOCKED)
1067 tuner_info("Tuner is locked.\n");
1068 if (tuner_status & TUNER_STATUS_STEREO)
1069 tuner_info("Stereo: yes\n");
1070 }
1071 if (analog_ops->has_signal)
1072 tuner_info("Signal strength: %d\n",
1073 analog_ops->has_signal(fe));
1074}
1075
1076/*
1077 * Function to splicitly change mode to radio. Probably not needed anymore
1078 */
1079
1080static int tuner_s_radio(struct v4l2_subdev *sd)
1081{
1082 struct tuner *t = to_tuner(sd);
1083
1084 if (set_mode(t, V4L2_TUNER_RADIO) == 0)
1085 set_freq(t, 0);
1086 return 0;
1087}
1088
1089/*
1090 * Tuner callbacks to handle userspace ioctl's
1091 */
1092
1093/**
1094 * tuner_s_power - controls the power state of the tuner
1095 * @sd: pointer to struct v4l2_subdev
1096 * @on: a zero value puts the tuner to sleep, non-zero wakes it up
1097 */
1098static int tuner_s_power(struct v4l2_subdev *sd, int on)
1099{
1100 struct tuner *t = to_tuner(sd);
1101 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
1102
1103 if (on) {
1104 if (t->standby && set_mode(t, t->mode) == 0) {
1105 tuner_dbg("Waking up tuner\n");
1106 set_freq(t, 0);
1107 }
1108 return 0;
1109 }
1110
1111 tuner_dbg("Putting tuner to sleep\n");
1112 t->standby = true;
1113 if (analog_ops->standby)
1114 analog_ops->standby(&t->fe);
1115 return 0;
1116}
1117
1118static int tuner_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
1119{
1120 struct tuner *t = to_tuner(sd);
1121
1122 if (set_mode(t, V4L2_TUNER_ANALOG_TV))
1123 return 0;
1124
1125 t->std = tuner_fixup_std(t, std);
1126 if (t->std != std)
1127 tuner_dbg("Fixup standard %llx to %llx\n", std, t->std);
1128 set_freq(t, 0);
1129 return 0;
1130}
1131
1132static int tuner_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
1133{
1134 struct tuner *t = to_tuner(sd);
1135
1136 if (set_mode(t, f->type) == 0)
1137 set_freq(t, f->frequency);
1138 return 0;
1139}
1140
1141/**
1142 * tuner_g_frequency - Get the tuned frequency for the tuner
1143 * @sd: pointer to struct v4l2_subdev
1144 * @f: pointer to struct v4l2_frequency
1145 *
1146 * At return, the structure f will be filled with tuner frequency
1147 * if the tuner matches the f->type.
1148 * Note: f->type should be initialized before calling it.
1149 * This is done by either video_ioctl2 or by the bridge driver.
1150 */
1151static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
1152{
1153 struct tuner *t = to_tuner(sd);
1154 struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
1155
1156 if (check_mode(t, f->type) == -EINVAL)
1157 return 0;
1158 if (f->type == t->mode && fe_tuner_ops->get_frequency && !t->standby) {
1159 u32 abs_freq;
1160
1161 fe_tuner_ops->get_frequency(&t->fe, &abs_freq);
1162 f->frequency = (V4L2_TUNER_RADIO == t->mode) ?
1163 DIV_ROUND_CLOSEST(abs_freq * 2, 125) :
1164 DIV_ROUND_CLOSEST(abs_freq, 62500);
1165 } else {
1166 f->frequency = (V4L2_TUNER_RADIO == f->type) ?
1167 t->radio_freq : t->tv_freq;
1168 }
1169 return 0;
1170}
1171
1172/**
1173 * tuner_g_tuner - Fill in tuner information
1174 * @sd: pointer to struct v4l2_subdev
1175 * @vt: pointer to struct v4l2_tuner
1176 *
1177 * At return, the structure vt will be filled with tuner information
1178 * if the tuner matches vt->type.
1179 * Note: vt->type should be initialized before calling it.
1180 * This is done by either video_ioctl2 or by the bridge driver.
1181 */
1182static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
1183{
1184 struct tuner *t = to_tuner(sd);
1185 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
1186 struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
1187
1188 if (check_mode(t, vt->type) == -EINVAL)
1189 return 0;
1190 if (vt->type == t->mode && analog_ops->get_afc)
1191 vt->afc = analog_ops->get_afc(&t->fe);
1192 if (analog_ops->has_signal)
1193 vt->signal = analog_ops->has_signal(&t->fe);
1194 if (vt->type != V4L2_TUNER_RADIO) {
1195 vt->capability |= V4L2_TUNER_CAP_NORM;
1196 vt->rangelow = tv_range[0] * 16;
1197 vt->rangehigh = tv_range[1] * 16;
1198 return 0;
1199 }
1200
1201 /* radio mode */
1202 if (vt->type == t->mode) {
1203 vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
1204 if (fe_tuner_ops->get_status) {
1205 u32 tuner_status;
1206
1207 fe_tuner_ops->get_status(&t->fe, &tuner_status);
1208 vt->rxsubchans =
1209 (tuner_status & TUNER_STATUS_STEREO) ?
1210 V4L2_TUNER_SUB_STEREO :
1211 V4L2_TUNER_SUB_MONO;
1212 }
1213 vt->audmode = t->audmode;
1214 }
1215 vt->capability |= V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
1216 vt->rangelow = radio_range[0] * 16000;
1217 vt->rangehigh = radio_range[1] * 16000;
1218
1219 return 0;
1220}
1221
1222/**
1223 * tuner_s_tuner - Set the tuner's audio mode
1224 * @sd: pointer to struct v4l2_subdev
1225 * @vt: pointer to struct v4l2_tuner
1226 *
1227 * Sets the audio mode if the tuner matches vt->type.
1228 * Note: vt->type should be initialized before calling it.
1229 * This is done by either video_ioctl2 or by the bridge driver.
1230 */
1231static int tuner_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
1232{
1233 struct tuner *t = to_tuner(sd);
1234
1235 if (set_mode(t, vt->type))
1236 return 0;
1237
1238 if (t->mode == V4L2_TUNER_RADIO)
1239 t->audmode = vt->audmode;
1240 set_freq(t, 0);
1241
1242 return 0;
1243}
1244
1245static int tuner_log_status(struct v4l2_subdev *sd)
1246{
1247 struct tuner *t = to_tuner(sd);
1248 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
1249
1250 if (analog_ops->tuner_status)
1251 analog_ops->tuner_status(&t->fe);
1252 return 0;
1253}
1254
1255#ifdef CONFIG_PM_SLEEP
1256static int tuner_suspend(struct device *dev)
1257{
1258 struct i2c_client *c = to_i2c_client(dev);
1259 struct tuner *t = to_tuner(i2c_get_clientdata(c));
1260 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
1261
1262 tuner_dbg("suspend\n");
1263
1264 if (!t->standby && analog_ops->standby)
1265 analog_ops->standby(&t->fe);
1266
1267 return 0;
1268}
1269
1270static int tuner_resume(struct device *dev)
1271{
1272 struct i2c_client *c = to_i2c_client(dev);
1273 struct tuner *t = to_tuner(i2c_get_clientdata(c));
1274
1275 tuner_dbg("resume\n");
1276
1277 if (!t->standby)
1278 if (set_mode(t, t->mode) == 0)
1279 set_freq(t, 0);
1280
1281 return 0;
1282}
1283#endif
1284
1285static int tuner_command(struct i2c_client *client, unsigned cmd, void *arg)
1286{
1287 struct v4l2_subdev *sd = i2c_get_clientdata(client);
1288
1289 /* TUNER_SET_CONFIG is still called by tuner-simple.c, so we have
1290 to handle it here.
1291 There must be a better way of doing this... */
1292 switch (cmd) {
1293 case TUNER_SET_CONFIG:
1294 return tuner_s_config(sd, arg);
1295 }
1296 return -ENOIOCTLCMD;
1297}
1298
1299/*
1300 * Callback structs
1301 */
1302
1303static const struct v4l2_subdev_core_ops tuner_core_ops = {
1304 .log_status = tuner_log_status,
1305 .s_std = tuner_s_std,
1306 .s_power = tuner_s_power,
1307};
1308
1309static const struct v4l2_subdev_tuner_ops tuner_tuner_ops = {
1310 .s_radio = tuner_s_radio,
1311 .g_tuner = tuner_g_tuner,
1312 .s_tuner = tuner_s_tuner,
1313 .s_frequency = tuner_s_frequency,
1314 .g_frequency = tuner_g_frequency,
1315 .s_type_addr = tuner_s_type_addr,
1316 .s_config = tuner_s_config,
1317};
1318
1319static const struct v4l2_subdev_ops tuner_ops = {
1320 .core = &tuner_core_ops,
1321 .tuner = &tuner_tuner_ops,
1322};
1323
1324/*
1325 * I2C structs and module init functions
1326 */
1327
1328static const struct dev_pm_ops tuner_pm_ops = {
1329 SET_SYSTEM_SLEEP_PM_OPS(tuner_suspend, tuner_resume)
1330};
1331
1332static const struct i2c_device_id tuner_id[] = {
1333 { "tuner", }, /* autodetect */
1334 { }
1335};
1336MODULE_DEVICE_TABLE(i2c, tuner_id);
1337
1338static struct i2c_driver tuner_driver = {
1339 .driver = {
1340 .owner = THIS_MODULE,
1341 .name = "tuner",
1342 .pm = &tuner_pm_ops,
1343 },
1344 .probe = tuner_probe,
1345 .remove = tuner_remove,
1346 .command = tuner_command,
1347 .id_table = tuner_id,
1348};
1349
1350module_i2c_driver(tuner_driver);
1351
1352MODULE_DESCRIPTION("device driver for various TV and TV+FM radio tuners");
1353MODULE_AUTHOR("Ralph Metzler, Gerd Knorr, Gunther Mayer");
1354MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
new file mode 100644
index 000000000000..105f88cdb9d6
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -0,0 +1,623 @@
1/*
2 * Video for Linux Two
3 *
4 * A generic video device interface for the LINUX operating system
5 * using a set of device structures/vectors for low level operations.
6 *
7 * This file replaces the videodev.c file that comes with the
8 * regular kernel distribution.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 * Author: Bill Dirks <bill@thedirks.org>
16 * based on code by Alan Cox, <alan@cymru.net>
17 *
18 */
19
20/*
21 * Video capture interface for Linux
22 *
23 * A generic video device interface for the LINUX operating system
24 * using a set of device structures/vectors for low level operations.
25 *
26 * This program is free software; you can redistribute it and/or
27 * modify it under the terms of the GNU General Public License
28 * as published by the Free Software Foundation; either version
29 * 2 of the License, or (at your option) any later version.
30 *
31 * Author: Alan Cox, <alan@lxorguk.ukuu.org.uk>
32 *
33 * Fixes:
34 */
35
36/*
37 * Video4linux 1/2 integration by Justin Schoeman
38 * <justin@suntiger.ee.up.ac.za>
39 * 2.4 PROCFS support ported from 2.4 kernels by
40 * Iñaki García Etxebarria <garetxe@euskalnet.net>
41 * Makefile fix by "W. Michael Petullo" <mike@flyn.org>
42 * 2.4 devfs support ported from 2.4 kernels by
43 * Dan Merillat <dan@merillat.org>
44 * Added Gerd Knorrs v4l1 enhancements (Justin Schoeman)
45 */
46
47#include <linux/module.h>
48#include <linux/types.h>
49#include <linux/kernel.h>
50#include <linux/mm.h>
51#include <linux/string.h>
52#include <linux/errno.h>
53#include <linux/i2c.h>
54#if defined(CONFIG_SPI)
55#include <linux/spi/spi.h>
56#endif
57#include <asm/uaccess.h>
58#include <asm/pgtable.h>
59#include <asm/io.h>
60#include <asm/div64.h>
61#include <media/v4l2-common.h>
62#include <media/v4l2-device.h>
63#include <media/v4l2-ctrls.h>
64#include <media/v4l2-chip-ident.h>
65
66#include <linux/videodev2.h>
67
68MODULE_AUTHOR("Bill Dirks, Justin Schoeman, Gerd Knorr");
69MODULE_DESCRIPTION("misc helper functions for v4l2 device drivers");
70MODULE_LICENSE("GPL");
71
72/*
73 *
74 * V 4 L 2 D R I V E R H E L P E R A P I
75 *
76 */
77
78/*
79 * Video Standard Operations (contributed by Michael Schimek)
80 */
81
82/* Helper functions for control handling */
83
84/* Check for correctness of the ctrl's value based on the data from
85 struct v4l2_queryctrl and the available menu items. Note that
86 menu_items may be NULL, in that case it is ignored. */
87int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl,
88 const char * const *menu_items)
89{
90 if (qctrl->flags & V4L2_CTRL_FLAG_DISABLED)
91 return -EINVAL;
92 if (qctrl->flags & V4L2_CTRL_FLAG_GRABBED)
93 return -EBUSY;
94 if (qctrl->type == V4L2_CTRL_TYPE_STRING)
95 return 0;
96 if (qctrl->type == V4L2_CTRL_TYPE_BUTTON ||
97 qctrl->type == V4L2_CTRL_TYPE_INTEGER64 ||
98 qctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
99 return 0;
100 if (ctrl->value < qctrl->minimum || ctrl->value > qctrl->maximum)
101 return -ERANGE;
102 if (qctrl->type == V4L2_CTRL_TYPE_MENU && menu_items != NULL) {
103 if (menu_items[ctrl->value] == NULL ||
104 menu_items[ctrl->value][0] == '\0')
105 return -EINVAL;
106 }
107 if (qctrl->type == V4L2_CTRL_TYPE_BITMASK &&
108 (ctrl->value & ~qctrl->maximum))
109 return -ERANGE;
110 return 0;
111}
112EXPORT_SYMBOL(v4l2_ctrl_check);
113
114/* Fill in a struct v4l2_queryctrl */
115int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 step, s32 def)
116{
117 const char *name;
118
119 v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type,
120 &min, &max, &step, &def, &qctrl->flags);
121
122 if (name == NULL)
123 return -EINVAL;
124
125 qctrl->minimum = min;
126 qctrl->maximum = max;
127 qctrl->step = step;
128 qctrl->default_value = def;
129 qctrl->reserved[0] = qctrl->reserved[1] = 0;
130 strlcpy(qctrl->name, name, sizeof(qctrl->name));
131 return 0;
132}
133EXPORT_SYMBOL(v4l2_ctrl_query_fill);
134
135/* Fill in a struct v4l2_querymenu based on the struct v4l2_queryctrl and
136 the menu. The qctrl pointer may be NULL, in which case it is ignored.
137 If menu_items is NULL, then the menu items are retrieved using
138 v4l2_ctrl_get_menu. */
139int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu, struct v4l2_queryctrl *qctrl,
140 const char * const *menu_items)
141{
142 int i;
143
144 qmenu->reserved = 0;
145 if (menu_items == NULL)
146 menu_items = v4l2_ctrl_get_menu(qmenu->id);
147 if (menu_items == NULL ||
148 (qctrl && (qmenu->index < qctrl->minimum || qmenu->index > qctrl->maximum)))
149 return -EINVAL;
150 for (i = 0; i < qmenu->index && menu_items[i]; i++) ;
151 if (menu_items[i] == NULL || menu_items[i][0] == '\0')
152 return -EINVAL;
153 strlcpy(qmenu->name, menu_items[qmenu->index], sizeof(qmenu->name));
154 return 0;
155}
156EXPORT_SYMBOL(v4l2_ctrl_query_menu);
157
158/* Fill in a struct v4l2_querymenu based on the specified array of valid
159 menu items (terminated by V4L2_CTRL_MENU_IDS_END).
160 Use this if there are 'holes' in the list of valid menu items. */
161int v4l2_ctrl_query_menu_valid_items(struct v4l2_querymenu *qmenu, const u32 *ids)
162{
163 const char * const *menu_items = v4l2_ctrl_get_menu(qmenu->id);
164
165 qmenu->reserved = 0;
166 if (menu_items == NULL || ids == NULL)
167 return -EINVAL;
168 while (*ids != V4L2_CTRL_MENU_IDS_END) {
169 if (*ids++ == qmenu->index) {
170 strlcpy(qmenu->name, menu_items[qmenu->index],
171 sizeof(qmenu->name));
172 return 0;
173 }
174 }
175 return -EINVAL;
176}
177EXPORT_SYMBOL(v4l2_ctrl_query_menu_valid_items);
178
179/* ctrl_classes points to an array of u32 pointers, the last element is
180 a NULL pointer. Each u32 array is a 0-terminated array of control IDs.
181 Each array must be sorted low to high and belong to the same control
182 class. The array of u32 pointers must also be sorted, from low class IDs
183 to high class IDs.
184
185 This function returns the first ID that follows after the given ID.
186 When no more controls are available 0 is returned. */
187u32 v4l2_ctrl_next(const u32 * const * ctrl_classes, u32 id)
188{
189 u32 ctrl_class = V4L2_CTRL_ID2CLASS(id);
190 const u32 *pctrl;
191
192 if (ctrl_classes == NULL)
193 return 0;
194
195 /* if no query is desired, then check if the ID is part of ctrl_classes */
196 if ((id & V4L2_CTRL_FLAG_NEXT_CTRL) == 0) {
197 /* find class */
198 while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) != ctrl_class)
199 ctrl_classes++;
200 if (*ctrl_classes == NULL)
201 return 0;
202 pctrl = *ctrl_classes;
203 /* find control ID */
204 while (*pctrl && *pctrl != id) pctrl++;
205 return *pctrl ? id : 0;
206 }
207 id &= V4L2_CTRL_ID_MASK;
208 id++; /* select next control */
209 /* find first class that matches (or is greater than) the class of
210 the ID */
211 while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) < ctrl_class)
212 ctrl_classes++;
213 /* no more classes */
214 if (*ctrl_classes == NULL)
215 return 0;
216 pctrl = *ctrl_classes;
217 /* find first ctrl within the class that is >= ID */
218 while (*pctrl && *pctrl < id) pctrl++;
219 if (*pctrl)
220 return *pctrl;
221 /* we are at the end of the controls of the current class. */
222 /* continue with next class if available */
223 ctrl_classes++;
224 if (*ctrl_classes == NULL)
225 return 0;
226 return **ctrl_classes;
227}
228EXPORT_SYMBOL(v4l2_ctrl_next);
229
230int v4l2_chip_match_host(const struct v4l2_dbg_match *match)
231{
232 switch (match->type) {
233 case V4L2_CHIP_MATCH_HOST:
234 return match->addr == 0;
235 default:
236 return 0;
237 }
238}
239EXPORT_SYMBOL(v4l2_chip_match_host);
240
241#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
242int v4l2_chip_match_i2c_client(struct i2c_client *c, const struct v4l2_dbg_match *match)
243{
244 int len;
245
246 if (c == NULL || match == NULL)
247 return 0;
248
249 switch (match->type) {
250 case V4L2_CHIP_MATCH_I2C_DRIVER:
251 if (c->driver == NULL || c->driver->driver.name == NULL)
252 return 0;
253 len = strlen(c->driver->driver.name);
254 /* legacy drivers have a ' suffix, don't try to match that */
255 if (len && c->driver->driver.name[len - 1] == '\'')
256 len--;
257 return len && !strncmp(c->driver->driver.name, match->name, len);
258 case V4L2_CHIP_MATCH_I2C_ADDR:
259 return c->addr == match->addr;
260 default:
261 return 0;
262 }
263}
264EXPORT_SYMBOL(v4l2_chip_match_i2c_client);
265
266int v4l2_chip_ident_i2c_client(struct i2c_client *c, struct v4l2_dbg_chip_ident *chip,
267 u32 ident, u32 revision)
268{
269 if (!v4l2_chip_match_i2c_client(c, &chip->match))
270 return 0;
271 if (chip->ident == V4L2_IDENT_NONE) {
272 chip->ident = ident;
273 chip->revision = revision;
274 }
275 else {
276 chip->ident = V4L2_IDENT_AMBIGUOUS;
277 chip->revision = 0;
278 }
279 return 0;
280}
281EXPORT_SYMBOL(v4l2_chip_ident_i2c_client);
282
283/* ----------------------------------------------------------------- */
284
285/* I2C Helper functions */
286
287
288void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
289 const struct v4l2_subdev_ops *ops)
290{
291 v4l2_subdev_init(sd, ops);
292 sd->flags |= V4L2_SUBDEV_FL_IS_I2C;
293 /* the owner is the same as the i2c_client's driver owner */
294 sd->owner = client->driver->driver.owner;
295 /* i2c_client and v4l2_subdev point to one another */
296 v4l2_set_subdevdata(sd, client);
297 i2c_set_clientdata(client, sd);
298 /* initialize name */
299 snprintf(sd->name, sizeof(sd->name), "%s %d-%04x",
300 client->driver->driver.name, i2c_adapter_id(client->adapter),
301 client->addr);
302}
303EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
304
305
306
307/* Load an i2c sub-device. */
308struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
309 struct i2c_adapter *adapter, struct i2c_board_info *info,
310 const unsigned short *probe_addrs)
311{
312 struct v4l2_subdev *sd = NULL;
313 struct i2c_client *client;
314
315 BUG_ON(!v4l2_dev);
316
317 request_module(I2C_MODULE_PREFIX "%s", info->type);
318
319 /* Create the i2c client */
320 if (info->addr == 0 && probe_addrs)
321 client = i2c_new_probed_device(adapter, info, probe_addrs,
322 NULL);
323 else
324 client = i2c_new_device(adapter, info);
325
326 /* Note: by loading the module first we are certain that c->driver
327 will be set if the driver was found. If the module was not loaded
328 first, then the i2c core tries to delay-load the module for us,
329 and then c->driver is still NULL until the module is finally
330 loaded. This delay-load mechanism doesn't work if other drivers
331 want to use the i2c device, so explicitly loading the module
332 is the best alternative. */
333 if (client == NULL || client->driver == NULL)
334 goto error;
335
336 /* Lock the module so we can safely get the v4l2_subdev pointer */
337 if (!try_module_get(client->driver->driver.owner))
338 goto error;
339 sd = i2c_get_clientdata(client);
340
341 /* Register with the v4l2_device which increases the module's
342 use count as well. */
343 if (v4l2_device_register_subdev(v4l2_dev, sd))
344 sd = NULL;
345 /* Decrease the module use count to match the first try_module_get. */
346 module_put(client->driver->driver.owner);
347
348error:
349 /* If we have a client but no subdev, then something went wrong and
350 we must unregister the client. */
351 if (client && sd == NULL)
352 i2c_unregister_device(client);
353 return sd;
354}
355EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
356
357struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
358 struct i2c_adapter *adapter, const char *client_type,
359 u8 addr, const unsigned short *probe_addrs)
360{
361 struct i2c_board_info info;
362
363 /* Setup the i2c board info with the device type and
364 the device address. */
365 memset(&info, 0, sizeof(info));
366 strlcpy(info.type, client_type, sizeof(info.type));
367 info.addr = addr;
368
369 return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, probe_addrs);
370}
371EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev);
372
373/* Return i2c client address of v4l2_subdev. */
374unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
375{
376 struct i2c_client *client = v4l2_get_subdevdata(sd);
377
378 return client ? client->addr : I2C_CLIENT_END;
379}
380EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_addr);
381
382/* Return a list of I2C tuner addresses to probe. Use only if the tuner
383 addresses are unknown. */
384const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type)
385{
386 static const unsigned short radio_addrs[] = {
387#if defined(CONFIG_MEDIA_TUNER_TEA5761) || defined(CONFIG_MEDIA_TUNER_TEA5761_MODULE)
388 0x10,
389#endif
390 0x60,
391 I2C_CLIENT_END
392 };
393 static const unsigned short demod_addrs[] = {
394 0x42, 0x43, 0x4a, 0x4b,
395 I2C_CLIENT_END
396 };
397 static const unsigned short tv_addrs[] = {
398 0x42, 0x43, 0x4a, 0x4b, /* tda8290 */
399 0x60, 0x61, 0x62, 0x63, 0x64,
400 I2C_CLIENT_END
401 };
402
403 switch (type) {
404 case ADDRS_RADIO:
405 return radio_addrs;
406 case ADDRS_DEMOD:
407 return demod_addrs;
408 case ADDRS_TV:
409 return tv_addrs;
410 case ADDRS_TV_WITH_DEMOD:
411 return tv_addrs + 4;
412 }
413 return NULL;
414}
415EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs);
416
417#endif /* defined(CONFIG_I2C) */
418
419#if defined(CONFIG_SPI)
420
421/* Load an spi sub-device. */
422
423void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi,
424 const struct v4l2_subdev_ops *ops)
425{
426 v4l2_subdev_init(sd, ops);
427 sd->flags |= V4L2_SUBDEV_FL_IS_SPI;
428 /* the owner is the same as the spi_device's driver owner */
429 sd->owner = spi->dev.driver->owner;
430 /* spi_device and v4l2_subdev point to one another */
431 v4l2_set_subdevdata(sd, spi);
432 spi_set_drvdata(spi, sd);
433 /* initialize name */
434 strlcpy(sd->name, spi->dev.driver->name, sizeof(sd->name));
435}
436EXPORT_SYMBOL_GPL(v4l2_spi_subdev_init);
437
438struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
439 struct spi_master *master, struct spi_board_info *info)
440{
441 struct v4l2_subdev *sd = NULL;
442 struct spi_device *spi = NULL;
443
444 BUG_ON(!v4l2_dev);
445
446 if (info->modalias)
447 request_module(info->modalias);
448
449 spi = spi_new_device(master, info);
450
451 if (spi == NULL || spi->dev.driver == NULL)
452 goto error;
453
454 if (!try_module_get(spi->dev.driver->owner))
455 goto error;
456
457 sd = spi_get_drvdata(spi);
458
459 /* Register with the v4l2_device which increases the module's
460 use count as well. */
461 if (v4l2_device_register_subdev(v4l2_dev, sd))
462 sd = NULL;
463
464 /* Decrease the module use count to match the first try_module_get. */
465 module_put(spi->dev.driver->owner);
466
467error:
468 /* If we have a client but no subdev, then something went wrong and
469 we must unregister the client. */
470 if (spi && sd == NULL)
471 spi_unregister_device(spi);
472
473 return sd;
474}
475EXPORT_SYMBOL_GPL(v4l2_spi_new_subdev);
476
477#endif /* defined(CONFIG_SPI) */
478
479/* Clamp x to be between min and max, aligned to a multiple of 2^align. min
480 * and max don't have to be aligned, but there must be at least one valid
481 * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
482 * of 16 between 17 and 31. */
483static unsigned int clamp_align(unsigned int x, unsigned int min,
484 unsigned int max, unsigned int align)
485{
486 /* Bits that must be zero to be aligned */
487 unsigned int mask = ~((1 << align) - 1);
488
489 /* Round to nearest aligned value */
490 if (align)
491 x = (x + (1 << (align - 1))) & mask;
492
493 /* Clamp to aligned value of min and max */
494 if (x < min)
495 x = (min + ~mask) & mask;
496 else if (x > max)
497 x = max & mask;
498
499 return x;
500}
501
502/* Bound an image to have a width between wmin and wmax, and height between
503 * hmin and hmax, inclusive. Additionally, the width will be a multiple of
504 * 2^walign, the height will be a multiple of 2^halign, and the overall size
505 * (width*height) will be a multiple of 2^salign. The image may be shrunk
506 * or enlarged to fit the alignment constraints.
507 *
508 * The width or height maximum must not be smaller than the corresponding
509 * minimum. The alignments must not be so high there are no possible image
510 * sizes within the allowed bounds. wmin and hmin must be at least 1
511 * (don't use 0). If you don't care about a certain alignment, specify 0,
512 * as 2^0 is 1 and one byte alignment is equivalent to no alignment. If
513 * you only want to adjust downward, specify a maximum that's the same as
514 * the initial value.
515 */
516void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
517 unsigned int walign,
518 u32 *h, unsigned int hmin, unsigned int hmax,
519 unsigned int halign, unsigned int salign)
520{
521 *w = clamp_align(*w, wmin, wmax, walign);
522 *h = clamp_align(*h, hmin, hmax, halign);
523
524 /* Usually we don't need to align the size and are done now. */
525 if (!salign)
526 return;
527
528 /* How much alignment do we have? */
529 walign = __ffs(*w);
530 halign = __ffs(*h);
531 /* Enough to satisfy the image alignment? */
532 if (walign + halign < salign) {
533 /* Max walign where there is still a valid width */
534 unsigned int wmaxa = __fls(wmax ^ (wmin - 1));
535 /* Max halign where there is still a valid height */
536 unsigned int hmaxa = __fls(hmax ^ (hmin - 1));
537
538 /* up the smaller alignment until we have enough */
539 do {
540 if (halign >= hmaxa ||
541 (walign <= halign && walign < wmaxa)) {
542 *w = clamp_align(*w, wmin, wmax, walign + 1);
543 walign = __ffs(*w);
544 } else {
545 *h = clamp_align(*h, hmin, hmax, halign + 1);
546 halign = __ffs(*h);
547 }
548 } while (halign + walign < salign);
549 }
550}
551EXPORT_SYMBOL_GPL(v4l_bound_align_image);
552
553/**
554 * v4l_fill_dv_preset_info - fill description of a digital video preset
555 * @preset - preset value
556 * @info - pointer to struct v4l2_dv_enum_preset
557 *
558 * drivers can use this helper function to fill description of dv preset
559 * in info.
560 */
561int v4l_fill_dv_preset_info(u32 preset, struct v4l2_dv_enum_preset *info)
562{
563 static const struct v4l2_dv_preset_info {
564 u16 width;
565 u16 height;
566 const char *name;
567 } dv_presets[] = {
568 { 0, 0, "Invalid" }, /* V4L2_DV_INVALID */
569 { 720, 480, "480p@59.94" }, /* V4L2_DV_480P59_94 */
570 { 720, 576, "576p@50" }, /* V4L2_DV_576P50 */
571 { 1280, 720, "720p@24" }, /* V4L2_DV_720P24 */
572 { 1280, 720, "720p@25" }, /* V4L2_DV_720P25 */
573 { 1280, 720, "720p@30" }, /* V4L2_DV_720P30 */
574 { 1280, 720, "720p@50" }, /* V4L2_DV_720P50 */
575 { 1280, 720, "720p@59.94" }, /* V4L2_DV_720P59_94 */
576 { 1280, 720, "720p@60" }, /* V4L2_DV_720P60 */
577 { 1920, 1080, "1080i@29.97" }, /* V4L2_DV_1080I29_97 */
578 { 1920, 1080, "1080i@30" }, /* V4L2_DV_1080I30 */
579 { 1920, 1080, "1080i@25" }, /* V4L2_DV_1080I25 */
580 { 1920, 1080, "1080i@50" }, /* V4L2_DV_1080I50 */
581 { 1920, 1080, "1080i@60" }, /* V4L2_DV_1080I60 */
582 { 1920, 1080, "1080p@24" }, /* V4L2_DV_1080P24 */
583 { 1920, 1080, "1080p@25" }, /* V4L2_DV_1080P25 */
584 { 1920, 1080, "1080p@30" }, /* V4L2_DV_1080P30 */
585 { 1920, 1080, "1080p@50" }, /* V4L2_DV_1080P50 */
586 { 1920, 1080, "1080p@60" }, /* V4L2_DV_1080P60 */
587 };
588
589 if (info == NULL || preset >= ARRAY_SIZE(dv_presets))
590 return -EINVAL;
591
592 info->preset = preset;
593 info->width = dv_presets[preset].width;
594 info->height = dv_presets[preset].height;
595 strlcpy(info->name, dv_presets[preset].name, sizeof(info->name));
596 return 0;
597}
598EXPORT_SYMBOL_GPL(v4l_fill_dv_preset_info);
599
600const struct v4l2_frmsize_discrete *v4l2_find_nearest_format(
601 const struct v4l2_discrete_probe *probe,
602 s32 width, s32 height)
603{
604 int i;
605 u32 error, min_error = UINT_MAX;
606 const struct v4l2_frmsize_discrete *size, *best = NULL;
607
608 if (!probe)
609 return best;
610
611 for (i = 0, size = probe->sizes; i < probe->num_sizes; i++, size++) {
612 error = abs(size->width - width) + abs(size->height - height);
613 if (error < min_error) {
614 min_error = error;
615 best = size;
616 }
617 if (!error)
618 break;
619 }
620
621 return best;
622}
623EXPORT_SYMBOL_GPL(v4l2_find_nearest_format);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
new file mode 100644
index 000000000000..9ebd5c540d10
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -0,0 +1,1045 @@
1/*
2 * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
3 * Separated from fs stuff by Arnd Bergmann <arnd@arndb.de>
4 *
5 * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
6 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs
8 * Copyright (C) 2003 Pavel Machek (pavel@ucw.cz)
9 * Copyright (C) 2005 Philippe De Muyter (phdm@macqel.be)
10 * Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
11 *
12 * These routines maintain argument size conversion between 32bit and 64bit
13 * ioctls.
14 */
15
16#include <linux/compat.h>
17#include <linux/module.h>
18#include <linux/videodev2.h>
19#include <media/v4l2-dev.h>
20#include <media/v4l2-ioctl.h>
21
22static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
23{
24 long ret = -ENOIOCTLCMD;
25
26 if (file->f_op->unlocked_ioctl)
27 ret = file->f_op->unlocked_ioctl(file, cmd, arg);
28
29 return ret;
30}
31
32
33struct v4l2_clip32 {
34 struct v4l2_rect c;
35 compat_caddr_t next;
36};
37
38struct v4l2_window32 {
39 struct v4l2_rect w;
40 __u32 field; /* enum v4l2_field */
41 __u32 chromakey;
42 compat_caddr_t clips; /* actually struct v4l2_clip32 * */
43 __u32 clipcount;
44 compat_caddr_t bitmap;
45};
46
47static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
48{
49 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_window32)) ||
50 copy_from_user(&kp->w, &up->w, sizeof(up->w)) ||
51 get_user(kp->field, &up->field) ||
52 get_user(kp->chromakey, &up->chromakey) ||
53 get_user(kp->clipcount, &up->clipcount))
54 return -EFAULT;
55 if (kp->clipcount > 2048)
56 return -EINVAL;
57 if (kp->clipcount) {
58 struct v4l2_clip32 __user *uclips;
59 struct v4l2_clip __user *kclips;
60 int n = kp->clipcount;
61 compat_caddr_t p;
62
63 if (get_user(p, &up->clips))
64 return -EFAULT;
65 uclips = compat_ptr(p);
66 kclips = compat_alloc_user_space(n * sizeof(struct v4l2_clip));
67 kp->clips = kclips;
68 while (--n >= 0) {
69 if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c)))
70 return -EFAULT;
71 if (put_user(n ? kclips + 1 : NULL, &kclips->next))
72 return -EFAULT;
73 uclips += 1;
74 kclips += 1;
75 }
76 } else
77 kp->clips = NULL;
78 return 0;
79}
80
81static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
82{
83 if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) ||
84 put_user(kp->field, &up->field) ||
85 put_user(kp->chromakey, &up->chromakey) ||
86 put_user(kp->clipcount, &up->clipcount))
87 return -EFAULT;
88 return 0;
89}
90
91static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
92{
93 if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format)))
94 return -EFAULT;
95 return 0;
96}
97
98static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
99 struct v4l2_pix_format_mplane __user *up)
100{
101 if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format_mplane)))
102 return -EFAULT;
103 return 0;
104}
105
106static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
107{
108 if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format)))
109 return -EFAULT;
110 return 0;
111}
112
113static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
114 struct v4l2_pix_format_mplane __user *up)
115{
116 if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format_mplane)))
117 return -EFAULT;
118 return 0;
119}
120
121static inline int get_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
122{
123 if (copy_from_user(kp, up, sizeof(struct v4l2_vbi_format)))
124 return -EFAULT;
125 return 0;
126}
127
128static inline int put_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
129{
130 if (copy_to_user(up, kp, sizeof(struct v4l2_vbi_format)))
131 return -EFAULT;
132 return 0;
133}
134
135static inline int get_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
136{
137 if (copy_from_user(kp, up, sizeof(struct v4l2_sliced_vbi_format)))
138 return -EFAULT;
139 return 0;
140}
141
142static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
143{
144 if (copy_to_user(up, kp, sizeof(struct v4l2_sliced_vbi_format)))
145 return -EFAULT;
146 return 0;
147}
148
149struct v4l2_format32 {
150 __u32 type; /* enum v4l2_buf_type */
151 union {
152 struct v4l2_pix_format pix;
153 struct v4l2_pix_format_mplane pix_mp;
154 struct v4l2_window32 win;
155 struct v4l2_vbi_format vbi;
156 struct v4l2_sliced_vbi_format sliced;
157 __u8 raw_data[200]; /* user-defined */
158 } fmt;
159};
160
161/**
162 * struct v4l2_create_buffers32 - VIDIOC_CREATE_BUFS32 argument
163 * @index: on return, index of the first created buffer
164 * @count: entry: number of requested buffers,
165 * return: number of created buffers
166 * @memory: buffer memory type
167 * @format: frame format, for which buffers are requested
168 * @reserved: future extensions
169 */
170struct v4l2_create_buffers32 {
171 __u32 index;
172 __u32 count;
173 __u32 memory; /* enum v4l2_memory */
174 struct v4l2_format32 format;
175 __u32 reserved[8];
176};
177
178static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
179{
180 switch (kp->type) {
181 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
182 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
183 return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
184 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
185 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
186 return get_v4l2_pix_format_mplane(&kp->fmt.pix_mp,
187 &up->fmt.pix_mp);
188 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
189 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
190 return get_v4l2_window32(&kp->fmt.win, &up->fmt.win);
191 case V4L2_BUF_TYPE_VBI_CAPTURE:
192 case V4L2_BUF_TYPE_VBI_OUTPUT:
193 return get_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
194 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
195 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
196 return get_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced);
197 case V4L2_BUF_TYPE_PRIVATE:
198 if (copy_from_user(kp, up, sizeof(kp->fmt.raw_data)))
199 return -EFAULT;
200 return 0;
201 default:
202 printk(KERN_INFO "compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
203 kp->type);
204 return -EINVAL;
205 }
206}
207
208static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
209{
210 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
211 get_user(kp->type, &up->type))
212 return -EFAULT;
213 return __get_v4l2_format32(kp, up);
214}
215
216static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
217{
218 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
219 copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt)))
220 return -EFAULT;
221 return __get_v4l2_format32(&kp->format, &up->format);
222}
223
224static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
225{
226 switch (kp->type) {
227 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
228 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
229 return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
230 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
231 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
232 return put_v4l2_pix_format_mplane(&kp->fmt.pix_mp,
233 &up->fmt.pix_mp);
234 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
235 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
236 return put_v4l2_window32(&kp->fmt.win, &up->fmt.win);
237 case V4L2_BUF_TYPE_VBI_CAPTURE:
238 case V4L2_BUF_TYPE_VBI_OUTPUT:
239 return put_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
240 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
241 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
242 return put_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced);
243 case V4L2_BUF_TYPE_PRIVATE:
244 if (copy_to_user(up, kp, sizeof(up->fmt.raw_data)))
245 return -EFAULT;
246 return 0;
247 default:
248 printk(KERN_INFO "compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
249 kp->type);
250 return -EINVAL;
251 }
252}
253
254static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
255{
256 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)) ||
257 put_user(kp->type, &up->type))
258 return -EFAULT;
259 return __put_v4l2_format32(kp, up);
260}
261
262static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
263{
264 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) ||
265 copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format.fmt)))
266 return -EFAULT;
267 return __put_v4l2_format32(&kp->format, &up->format);
268}
269
270struct v4l2_standard32 {
271 __u32 index;
272 __u32 id[2]; /* __u64 would get the alignment wrong */
273 __u8 name[24];
274 struct v4l2_fract frameperiod; /* Frames, not fields */
275 __u32 framelines;
276 __u32 reserved[4];
277};
278
279static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
280{
281 /* other fields are not set by the user, nor used by the driver */
282 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard32)) ||
283 get_user(kp->index, &up->index))
284 return -EFAULT;
285 return 0;
286}
287
288static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
289{
290 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) ||
291 put_user(kp->index, &up->index) ||
292 copy_to_user(up->id, &kp->id, sizeof(__u64)) ||
293 copy_to_user(up->name, kp->name, 24) ||
294 copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) ||
295 put_user(kp->framelines, &up->framelines) ||
296 copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32)))
297 return -EFAULT;
298 return 0;
299}
300
301struct v4l2_plane32 {
302 __u32 bytesused;
303 __u32 length;
304 union {
305 __u32 mem_offset;
306 compat_long_t userptr;
307 } m;
308 __u32 data_offset;
309 __u32 reserved[11];
310};
311
312struct v4l2_buffer32 {
313 __u32 index;
314 __u32 type; /* enum v4l2_buf_type */
315 __u32 bytesused;
316 __u32 flags;
317 __u32 field; /* enum v4l2_field */
318 struct compat_timeval timestamp;
319 struct v4l2_timecode timecode;
320 __u32 sequence;
321
322 /* memory location */
323 __u32 memory; /* enum v4l2_memory */
324 union {
325 __u32 offset;
326 compat_long_t userptr;
327 compat_caddr_t planes;
328 } m;
329 __u32 length;
330 __u32 reserved2;
331 __u32 reserved;
332};
333
334static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
335 enum v4l2_memory memory)
336{
337 void __user *up_pln;
338 compat_long_t p;
339
340 if (copy_in_user(up, up32, 2 * sizeof(__u32)) ||
341 copy_in_user(&up->data_offset, &up32->data_offset,
342 sizeof(__u32)))
343 return -EFAULT;
344
345 if (memory == V4L2_MEMORY_USERPTR) {
346 if (get_user(p, &up32->m.userptr))
347 return -EFAULT;
348 up_pln = compat_ptr(p);
349 if (put_user((unsigned long)up_pln, &up->m.userptr))
350 return -EFAULT;
351 } else {
352 if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset,
353 sizeof(__u32)))
354 return -EFAULT;
355 }
356
357 return 0;
358}
359
360static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
361 enum v4l2_memory memory)
362{
363 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
364 copy_in_user(&up32->data_offset, &up->data_offset,
365 sizeof(__u32)))
366 return -EFAULT;
367
368 /* For MMAP, driver might've set up the offset, so copy it back.
369 * USERPTR stays the same (was userspace-provided), so no copying. */
370 if (memory == V4L2_MEMORY_MMAP)
371 if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset,
372 sizeof(__u32)))
373 return -EFAULT;
374
375 return 0;
376}
377
378static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
379{
380 struct v4l2_plane32 __user *uplane32;
381 struct v4l2_plane __user *uplane;
382 compat_caddr_t p;
383 int num_planes;
384 int ret;
385
386 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) ||
387 get_user(kp->index, &up->index) ||
388 get_user(kp->type, &up->type) ||
389 get_user(kp->flags, &up->flags) ||
390 get_user(kp->memory, &up->memory))
391 return -EFAULT;
392
393 if (V4L2_TYPE_IS_OUTPUT(kp->type))
394 if (get_user(kp->bytesused, &up->bytesused) ||
395 get_user(kp->field, &up->field) ||
396 get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
397 get_user(kp->timestamp.tv_usec,
398 &up->timestamp.tv_usec))
399 return -EFAULT;
400
401 if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
402 if (get_user(kp->length, &up->length))
403 return -EFAULT;
404
405 num_planes = kp->length;
406 if (num_planes == 0) {
407 kp->m.planes = NULL;
408 /* num_planes == 0 is legal, e.g. when userspace doesn't
409 * need planes array on DQBUF*/
410 return 0;
411 }
412
413 if (get_user(p, &up->m.planes))
414 return -EFAULT;
415
416 uplane32 = compat_ptr(p);
417 if (!access_ok(VERIFY_READ, uplane32,
418 num_planes * sizeof(struct v4l2_plane32)))
419 return -EFAULT;
420
421 /* We don't really care if userspace decides to kill itself
422 * by passing a very big num_planes value */
423 uplane = compat_alloc_user_space(num_planes *
424 sizeof(struct v4l2_plane));
425 kp->m.planes = uplane;
426
427 while (--num_planes >= 0) {
428 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
429 if (ret)
430 return ret;
431 ++uplane;
432 ++uplane32;
433 }
434 } else {
435 switch (kp->memory) {
436 case V4L2_MEMORY_MMAP:
437 if (get_user(kp->length, &up->length) ||
438 get_user(kp->m.offset, &up->m.offset))
439 return -EFAULT;
440 break;
441 case V4L2_MEMORY_USERPTR:
442 {
443 compat_long_t tmp;
444
445 if (get_user(kp->length, &up->length) ||
446 get_user(tmp, &up->m.userptr))
447 return -EFAULT;
448
449 kp->m.userptr = (unsigned long)compat_ptr(tmp);
450 }
451 break;
452 case V4L2_MEMORY_OVERLAY:
453 if (get_user(kp->m.offset, &up->m.offset))
454 return -EFAULT;
455 break;
456 }
457 }
458
459 return 0;
460}
461
462static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
463{
464 struct v4l2_plane32 __user *uplane32;
465 struct v4l2_plane __user *uplane;
466 compat_caddr_t p;
467 int num_planes;
468 int ret;
469
470 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) ||
471 put_user(kp->index, &up->index) ||
472 put_user(kp->type, &up->type) ||
473 put_user(kp->flags, &up->flags) ||
474 put_user(kp->memory, &up->memory))
475 return -EFAULT;
476
477 if (put_user(kp->bytesused, &up->bytesused) ||
478 put_user(kp->field, &up->field) ||
479 put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
480 put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) ||
481 copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
482 put_user(kp->sequence, &up->sequence) ||
483 put_user(kp->reserved2, &up->reserved2) ||
484 put_user(kp->reserved, &up->reserved))
485 return -EFAULT;
486
487 if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
488 num_planes = kp->length;
489 if (num_planes == 0)
490 return 0;
491
492 uplane = kp->m.planes;
493 if (get_user(p, &up->m.planes))
494 return -EFAULT;
495 uplane32 = compat_ptr(p);
496
497 while (--num_planes >= 0) {
498 ret = put_v4l2_plane32(uplane, uplane32, kp->memory);
499 if (ret)
500 return ret;
501 ++uplane;
502 ++uplane32;
503 }
504 } else {
505 switch (kp->memory) {
506 case V4L2_MEMORY_MMAP:
507 if (put_user(kp->length, &up->length) ||
508 put_user(kp->m.offset, &up->m.offset))
509 return -EFAULT;
510 break;
511 case V4L2_MEMORY_USERPTR:
512 if (put_user(kp->length, &up->length) ||
513 put_user(kp->m.userptr, &up->m.userptr))
514 return -EFAULT;
515 break;
516 case V4L2_MEMORY_OVERLAY:
517 if (put_user(kp->m.offset, &up->m.offset))
518 return -EFAULT;
519 break;
520 }
521 }
522
523 return 0;
524}
525
526struct v4l2_framebuffer32 {
527 __u32 capability;
528 __u32 flags;
529 compat_caddr_t base;
530 struct v4l2_pix_format fmt;
531};
532
533static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
534{
535 u32 tmp;
536
537 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) ||
538 get_user(tmp, &up->base) ||
539 get_user(kp->capability, &up->capability) ||
540 get_user(kp->flags, &up->flags))
541 return -EFAULT;
542 kp->base = compat_ptr(tmp);
543 get_v4l2_pix_format(&kp->fmt, &up->fmt);
544 return 0;
545}
546
547static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
548{
549 u32 tmp = (u32)((unsigned long)kp->base);
550
551 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) ||
552 put_user(tmp, &up->base) ||
553 put_user(kp->capability, &up->capability) ||
554 put_user(kp->flags, &up->flags))
555 return -EFAULT;
556 put_v4l2_pix_format(&kp->fmt, &up->fmt);
557 return 0;
558}
559
560struct v4l2_input32 {
561 __u32 index; /* Which input */
562 __u8 name[32]; /* Label */
563 __u32 type; /* Type of input */
564 __u32 audioset; /* Associated audios (bitfield) */
565 __u32 tuner; /* Associated tuner */
566 v4l2_std_id std;
567 __u32 status;
568 __u32 reserved[4];
569} __attribute__ ((packed));
570
571/* The 64-bit v4l2_input struct has extra padding at the end of the struct.
572 Otherwise it is identical to the 32-bit version. */
573static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
574{
575 if (copy_from_user(kp, up, sizeof(struct v4l2_input32)))
576 return -EFAULT;
577 return 0;
578}
579
580static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
581{
582 if (copy_to_user(up, kp, sizeof(struct v4l2_input32)))
583 return -EFAULT;
584 return 0;
585}
586
587struct v4l2_ext_controls32 {
588 __u32 ctrl_class;
589 __u32 count;
590 __u32 error_idx;
591 __u32 reserved[2];
592 compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */
593};
594
595struct v4l2_ext_control32 {
596 __u32 id;
597 __u32 size;
598 __u32 reserved2[1];
599 union {
600 __s32 value;
601 __s64 value64;
602 compat_caddr_t string; /* actually char * */
603 };
604} __attribute__ ((packed));
605
606/* The following function really belong in v4l2-common, but that causes
607 a circular dependency between modules. We need to think about this, but
608 for now this will do. */
609
610/* Return non-zero if this control is a pointer type. Currently only
611 type STRING is a pointer type. */
612static inline int ctrl_is_pointer(u32 id)
613{
614 switch (id) {
615 case V4L2_CID_RDS_TX_PS_NAME:
616 case V4L2_CID_RDS_TX_RADIO_TEXT:
617 return 1;
618 default:
619 return 0;
620 }
621}
622
623static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
624{
625 struct v4l2_ext_control32 __user *ucontrols;
626 struct v4l2_ext_control __user *kcontrols;
627 int n;
628 compat_caddr_t p;
629
630 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) ||
631 get_user(kp->ctrl_class, &up->ctrl_class) ||
632 get_user(kp->count, &up->count) ||
633 get_user(kp->error_idx, &up->error_idx) ||
634 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
635 return -EFAULT;
636 n = kp->count;
637 if (n == 0) {
638 kp->controls = NULL;
639 return 0;
640 }
641 if (get_user(p, &up->controls))
642 return -EFAULT;
643 ucontrols = compat_ptr(p);
644 if (!access_ok(VERIFY_READ, ucontrols,
645 n * sizeof(struct v4l2_ext_control32)))
646 return -EFAULT;
647 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
648 kp->controls = kcontrols;
649 while (--n >= 0) {
650 if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
651 return -EFAULT;
652 if (ctrl_is_pointer(kcontrols->id)) {
653 void __user *s;
654
655 if (get_user(p, &ucontrols->string))
656 return -EFAULT;
657 s = compat_ptr(p);
658 if (put_user(s, &kcontrols->string))
659 return -EFAULT;
660 }
661 ucontrols++;
662 kcontrols++;
663 }
664 return 0;
665}
666
667static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
668{
669 struct v4l2_ext_control32 __user *ucontrols;
670 struct v4l2_ext_control __user *kcontrols = kp->controls;
671 int n = kp->count;
672 compat_caddr_t p;
673
674 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) ||
675 put_user(kp->ctrl_class, &up->ctrl_class) ||
676 put_user(kp->count, &up->count) ||
677 put_user(kp->error_idx, &up->error_idx) ||
678 copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
679 return -EFAULT;
680 if (!kp->count)
681 return 0;
682
683 if (get_user(p, &up->controls))
684 return -EFAULT;
685 ucontrols = compat_ptr(p);
686 if (!access_ok(VERIFY_WRITE, ucontrols,
687 n * sizeof(struct v4l2_ext_control32)))
688 return -EFAULT;
689
690 while (--n >= 0) {
691 unsigned size = sizeof(*ucontrols);
692
693 /* Do not modify the pointer when copying a pointer control.
694 The contents of the pointer was changed, not the pointer
695 itself. */
696 if (ctrl_is_pointer(kcontrols->id))
697 size -= sizeof(ucontrols->value64);
698 if (copy_in_user(ucontrols, kcontrols, size))
699 return -EFAULT;
700 ucontrols++;
701 kcontrols++;
702 }
703 return 0;
704}
705
706struct v4l2_event32 {
707 __u32 type;
708 union {
709 __u8 data[64];
710 } u;
711 __u32 pending;
712 __u32 sequence;
713 struct compat_timespec timestamp;
714 __u32 id;
715 __u32 reserved[8];
716};
717
718static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up)
719{
720 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) ||
721 put_user(kp->type, &up->type) ||
722 copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
723 put_user(kp->pending, &up->pending) ||
724 put_user(kp->sequence, &up->sequence) ||
725 put_compat_timespec(&kp->timestamp, &up->timestamp) ||
726 put_user(kp->id, &up->id) ||
727 copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
728 return -EFAULT;
729 return 0;
730}
731
732#define VIDIOC_G_FMT32 _IOWR('V', 4, struct v4l2_format32)
733#define VIDIOC_S_FMT32 _IOWR('V', 5, struct v4l2_format32)
734#define VIDIOC_QUERYBUF32 _IOWR('V', 9, struct v4l2_buffer32)
735#define VIDIOC_G_FBUF32 _IOR ('V', 10, struct v4l2_framebuffer32)
736#define VIDIOC_S_FBUF32 _IOW ('V', 11, struct v4l2_framebuffer32)
737#define VIDIOC_QBUF32 _IOWR('V', 15, struct v4l2_buffer32)
738#define VIDIOC_DQBUF32 _IOWR('V', 17, struct v4l2_buffer32)
739#define VIDIOC_ENUMSTD32 _IOWR('V', 25, struct v4l2_standard32)
740#define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32)
741#define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32)
742#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32)
743#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32)
744#define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32)
745#define VIDIOC_DQEVENT32 _IOR ('V', 89, struct v4l2_event32)
746#define VIDIOC_CREATE_BUFS32 _IOWR('V', 92, struct v4l2_create_buffers32)
747#define VIDIOC_PREPARE_BUF32 _IOWR('V', 93, struct v4l2_buffer32)
748
749#define VIDIOC_OVERLAY32 _IOW ('V', 14, s32)
750#define VIDIOC_STREAMON32 _IOW ('V', 18, s32)
751#define VIDIOC_STREAMOFF32 _IOW ('V', 19, s32)
752#define VIDIOC_G_INPUT32 _IOR ('V', 38, s32)
753#define VIDIOC_S_INPUT32 _IOWR('V', 39, s32)
754#define VIDIOC_G_OUTPUT32 _IOR ('V', 46, s32)
755#define VIDIOC_S_OUTPUT32 _IOWR('V', 47, s32)
756
757static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
758{
759 union {
760 struct v4l2_format v2f;
761 struct v4l2_buffer v2b;
762 struct v4l2_framebuffer v2fb;
763 struct v4l2_input v2i;
764 struct v4l2_standard v2s;
765 struct v4l2_ext_controls v2ecs;
766 struct v4l2_event v2ev;
767 struct v4l2_create_buffers v2crt;
768 unsigned long vx;
769 int vi;
770 } karg;
771 void __user *up = compat_ptr(arg);
772 int compatible_arg = 1;
773 long err = 0;
774
775 /* First, convert the command. */
776 switch (cmd) {
777 case VIDIOC_G_FMT32: cmd = VIDIOC_G_FMT; break;
778 case VIDIOC_S_FMT32: cmd = VIDIOC_S_FMT; break;
779 case VIDIOC_QUERYBUF32: cmd = VIDIOC_QUERYBUF; break;
780 case VIDIOC_G_FBUF32: cmd = VIDIOC_G_FBUF; break;
781 case VIDIOC_S_FBUF32: cmd = VIDIOC_S_FBUF; break;
782 case VIDIOC_QBUF32: cmd = VIDIOC_QBUF; break;
783 case VIDIOC_DQBUF32: cmd = VIDIOC_DQBUF; break;
784 case VIDIOC_ENUMSTD32: cmd = VIDIOC_ENUMSTD; break;
785 case VIDIOC_ENUMINPUT32: cmd = VIDIOC_ENUMINPUT; break;
786 case VIDIOC_TRY_FMT32: cmd = VIDIOC_TRY_FMT; break;
787 case VIDIOC_G_EXT_CTRLS32: cmd = VIDIOC_G_EXT_CTRLS; break;
788 case VIDIOC_S_EXT_CTRLS32: cmd = VIDIOC_S_EXT_CTRLS; break;
789 case VIDIOC_TRY_EXT_CTRLS32: cmd = VIDIOC_TRY_EXT_CTRLS; break;
790 case VIDIOC_DQEVENT32: cmd = VIDIOC_DQEVENT; break;
791 case VIDIOC_OVERLAY32: cmd = VIDIOC_OVERLAY; break;
792 case VIDIOC_STREAMON32: cmd = VIDIOC_STREAMON; break;
793 case VIDIOC_STREAMOFF32: cmd = VIDIOC_STREAMOFF; break;
794 case VIDIOC_G_INPUT32: cmd = VIDIOC_G_INPUT; break;
795 case VIDIOC_S_INPUT32: cmd = VIDIOC_S_INPUT; break;
796 case VIDIOC_G_OUTPUT32: cmd = VIDIOC_G_OUTPUT; break;
797 case VIDIOC_S_OUTPUT32: cmd = VIDIOC_S_OUTPUT; break;
798 case VIDIOC_CREATE_BUFS32: cmd = VIDIOC_CREATE_BUFS; break;
799 case VIDIOC_PREPARE_BUF32: cmd = VIDIOC_PREPARE_BUF; break;
800 }
801
802 switch (cmd) {
803 case VIDIOC_OVERLAY:
804 case VIDIOC_STREAMON:
805 case VIDIOC_STREAMOFF:
806 case VIDIOC_S_INPUT:
807 case VIDIOC_S_OUTPUT:
808 err = get_user(karg.vi, (s32 __user *)up);
809 compatible_arg = 0;
810 break;
811
812 case VIDIOC_G_INPUT:
813 case VIDIOC_G_OUTPUT:
814 compatible_arg = 0;
815 break;
816
817 case VIDIOC_G_FMT:
818 case VIDIOC_S_FMT:
819 case VIDIOC_TRY_FMT:
820 err = get_v4l2_format32(&karg.v2f, up);
821 compatible_arg = 0;
822 break;
823
824 case VIDIOC_CREATE_BUFS:
825 err = get_v4l2_create32(&karg.v2crt, up);
826 compatible_arg = 0;
827 break;
828
829 case VIDIOC_PREPARE_BUF:
830 case VIDIOC_QUERYBUF:
831 case VIDIOC_QBUF:
832 case VIDIOC_DQBUF:
833 err = get_v4l2_buffer32(&karg.v2b, up);
834 compatible_arg = 0;
835 break;
836
837 case VIDIOC_S_FBUF:
838 err = get_v4l2_framebuffer32(&karg.v2fb, up);
839 compatible_arg = 0;
840 break;
841
842 case VIDIOC_G_FBUF:
843 compatible_arg = 0;
844 break;
845
846 case VIDIOC_ENUMSTD:
847 err = get_v4l2_standard32(&karg.v2s, up);
848 compatible_arg = 0;
849 break;
850
851 case VIDIOC_ENUMINPUT:
852 err = get_v4l2_input32(&karg.v2i, up);
853 compatible_arg = 0;
854 break;
855
856 case VIDIOC_G_EXT_CTRLS:
857 case VIDIOC_S_EXT_CTRLS:
858 case VIDIOC_TRY_EXT_CTRLS:
859 err = get_v4l2_ext_controls32(&karg.v2ecs, up);
860 compatible_arg = 0;
861 break;
862 case VIDIOC_DQEVENT:
863 compatible_arg = 0;
864 break;
865 }
866 if (err)
867 return err;
868
869 if (compatible_arg)
870 err = native_ioctl(file, cmd, (unsigned long)up);
871 else {
872 mm_segment_t old_fs = get_fs();
873
874 set_fs(KERNEL_DS);
875 err = native_ioctl(file, cmd, (unsigned long)&karg);
876 set_fs(old_fs);
877 }
878
879 /* Special case: even after an error we need to put the
880 results back for these ioctls since the error_idx will
881 contain information on which control failed. */
882 switch (cmd) {
883 case VIDIOC_G_EXT_CTRLS:
884 case VIDIOC_S_EXT_CTRLS:
885 case VIDIOC_TRY_EXT_CTRLS:
886 if (put_v4l2_ext_controls32(&karg.v2ecs, up))
887 err = -EFAULT;
888 break;
889 }
890 if (err)
891 return err;
892
893 switch (cmd) {
894 case VIDIOC_S_INPUT:
895 case VIDIOC_S_OUTPUT:
896 case VIDIOC_G_INPUT:
897 case VIDIOC_G_OUTPUT:
898 err = put_user(((s32)karg.vi), (s32 __user *)up);
899 break;
900
901 case VIDIOC_G_FBUF:
902 err = put_v4l2_framebuffer32(&karg.v2fb, up);
903 break;
904
905 case VIDIOC_DQEVENT:
906 err = put_v4l2_event32(&karg.v2ev, up);
907 break;
908
909 case VIDIOC_G_FMT:
910 case VIDIOC_S_FMT:
911 case VIDIOC_TRY_FMT:
912 err = put_v4l2_format32(&karg.v2f, up);
913 break;
914
915 case VIDIOC_CREATE_BUFS:
916 err = put_v4l2_create32(&karg.v2crt, up);
917 break;
918
919 case VIDIOC_QUERYBUF:
920 case VIDIOC_QBUF:
921 case VIDIOC_DQBUF:
922 err = put_v4l2_buffer32(&karg.v2b, up);
923 break;
924
925 case VIDIOC_ENUMSTD:
926 err = put_v4l2_standard32(&karg.v2s, up);
927 break;
928
929 case VIDIOC_ENUMINPUT:
930 err = put_v4l2_input32(&karg.v2i, up);
931 break;
932 }
933 return err;
934}
935
936long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
937{
938 struct video_device *vdev = video_devdata(file);
939 long ret = -ENOIOCTLCMD;
940
941 if (!file->f_op->unlocked_ioctl)
942 return ret;
943
944 switch (cmd) {
945 case VIDIOC_QUERYCAP:
946 case VIDIOC_RESERVED:
947 case VIDIOC_ENUM_FMT:
948 case VIDIOC_G_FMT32:
949 case VIDIOC_S_FMT32:
950 case VIDIOC_REQBUFS:
951 case VIDIOC_QUERYBUF32:
952 case VIDIOC_G_FBUF32:
953 case VIDIOC_S_FBUF32:
954 case VIDIOC_OVERLAY32:
955 case VIDIOC_QBUF32:
956 case VIDIOC_DQBUF32:
957 case VIDIOC_STREAMON32:
958 case VIDIOC_STREAMOFF32:
959 case VIDIOC_G_PARM:
960 case VIDIOC_S_PARM:
961 case VIDIOC_G_STD:
962 case VIDIOC_S_STD:
963 case VIDIOC_ENUMSTD32:
964 case VIDIOC_ENUMINPUT32:
965 case VIDIOC_G_CTRL:
966 case VIDIOC_S_CTRL:
967 case VIDIOC_G_TUNER:
968 case VIDIOC_S_TUNER:
969 case VIDIOC_G_AUDIO:
970 case VIDIOC_S_AUDIO:
971 case VIDIOC_QUERYCTRL:
972 case VIDIOC_QUERYMENU:
973 case VIDIOC_G_INPUT32:
974 case VIDIOC_S_INPUT32:
975 case VIDIOC_G_OUTPUT32:
976 case VIDIOC_S_OUTPUT32:
977 case VIDIOC_ENUMOUTPUT:
978 case VIDIOC_G_AUDOUT:
979 case VIDIOC_S_AUDOUT:
980 case VIDIOC_G_MODULATOR:
981 case VIDIOC_S_MODULATOR:
982 case VIDIOC_S_FREQUENCY:
983 case VIDIOC_G_FREQUENCY:
984 case VIDIOC_CROPCAP:
985 case VIDIOC_G_CROP:
986 case VIDIOC_S_CROP:
987 case VIDIOC_G_SELECTION:
988 case VIDIOC_S_SELECTION:
989 case VIDIOC_G_JPEGCOMP:
990 case VIDIOC_S_JPEGCOMP:
991 case VIDIOC_QUERYSTD:
992 case VIDIOC_TRY_FMT32:
993 case VIDIOC_ENUMAUDIO:
994 case VIDIOC_ENUMAUDOUT:
995 case VIDIOC_G_PRIORITY:
996 case VIDIOC_S_PRIORITY:
997 case VIDIOC_G_SLICED_VBI_CAP:
998 case VIDIOC_LOG_STATUS:
999 case VIDIOC_G_EXT_CTRLS32:
1000 case VIDIOC_S_EXT_CTRLS32:
1001 case VIDIOC_TRY_EXT_CTRLS32:
1002 case VIDIOC_ENUM_FRAMESIZES:
1003 case VIDIOC_ENUM_FRAMEINTERVALS:
1004 case VIDIOC_G_ENC_INDEX:
1005 case VIDIOC_ENCODER_CMD:
1006 case VIDIOC_TRY_ENCODER_CMD:
1007 case VIDIOC_DECODER_CMD:
1008 case VIDIOC_TRY_DECODER_CMD:
1009 case VIDIOC_DBG_S_REGISTER:
1010 case VIDIOC_DBG_G_REGISTER:
1011 case VIDIOC_DBG_G_CHIP_IDENT:
1012 case VIDIOC_S_HW_FREQ_SEEK:
1013 case VIDIOC_ENUM_DV_PRESETS:
1014 case VIDIOC_S_DV_PRESET:
1015 case VIDIOC_G_DV_PRESET:
1016 case VIDIOC_QUERY_DV_PRESET:
1017 case VIDIOC_S_DV_TIMINGS:
1018 case VIDIOC_G_DV_TIMINGS:
1019 case VIDIOC_DQEVENT:
1020 case VIDIOC_DQEVENT32:
1021 case VIDIOC_SUBSCRIBE_EVENT:
1022 case VIDIOC_UNSUBSCRIBE_EVENT:
1023 case VIDIOC_CREATE_BUFS32:
1024 case VIDIOC_PREPARE_BUF32:
1025 case VIDIOC_ENUM_DV_TIMINGS:
1026 case VIDIOC_QUERY_DV_TIMINGS:
1027 case VIDIOC_DV_TIMINGS_CAP:
1028 case VIDIOC_ENUM_FREQ_BANDS:
1029 ret = do_video_ioctl(file, cmd, arg);
1030 break;
1031
1032 default:
1033 if (vdev->fops->compat_ioctl32)
1034 ret = vdev->fops->compat_ioctl32(file, cmd, arg);
1035
1036 if (ret == -ENOIOCTLCMD)
1037 printk(KERN_WARNING "compat_ioctl32: "
1038 "unknown ioctl '%c', dir=%d, #%d (0x%08x)\n",
1039 _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd),
1040 cmd);
1041 break;
1042 }
1043 return ret;
1044}
1045EXPORT_SYMBOL_GPL(v4l2_compat_ioctl32);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
new file mode 100644
index 000000000000..b6a2ee71e5c3
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -0,0 +1,2651 @@
1/*
2 V4L2 controls framework implementation.
3
4 Copyright (C) 2010 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/ctype.h>
22#include <linux/slab.h>
23#include <linux/export.h>
24#include <media/v4l2-ioctl.h>
25#include <media/v4l2-device.h>
26#include <media/v4l2-ctrls.h>
27#include <media/v4l2-event.h>
28#include <media/v4l2-dev.h>
29
30#define has_op(master, op) \
31 (master->ops && master->ops->op)
32#define call_op(master, op) \
33 (has_op(master, op) ? master->ops->op(master) : 0)
34
35/* Internal temporary helper struct, one for each v4l2_ext_control */
36struct v4l2_ctrl_helper {
37 /* Pointer to the control reference of the master control */
38 struct v4l2_ctrl_ref *mref;
39 /* The control corresponding to the v4l2_ext_control ID field. */
40 struct v4l2_ctrl *ctrl;
41 /* v4l2_ext_control index of the next control belonging to the
42 same cluster, or 0 if there isn't any. */
43 u32 next;
44};
45
46/* Small helper function to determine if the autocluster is set to manual
47 mode. */
48static bool is_cur_manual(const struct v4l2_ctrl *master)
49{
50 return master->is_auto && master->cur.val == master->manual_mode_value;
51}
52
53/* Same as above, but this checks the against the new value instead of the
54 current value. */
55static bool is_new_manual(const struct v4l2_ctrl *master)
56{
57 return master->is_auto && master->val == master->manual_mode_value;
58}
59
60/* Returns NULL or a character pointer array containing the menu for
61 the given control ID. The pointer array ends with a NULL pointer.
62 An empty string signifies a menu entry that is invalid. This allows
63 drivers to disable certain options if it is not supported. */
64const char * const *v4l2_ctrl_get_menu(u32 id)
65{
66 static const char * const mpeg_audio_sampling_freq[] = {
67 "44.1 kHz",
68 "48 kHz",
69 "32 kHz",
70 NULL
71 };
72 static const char * const mpeg_audio_encoding[] = {
73 "MPEG-1/2 Layer I",
74 "MPEG-1/2 Layer II",
75 "MPEG-1/2 Layer III",
76 "MPEG-2/4 AAC",
77 "AC-3",
78 NULL
79 };
80 static const char * const mpeg_audio_l1_bitrate[] = {
81 "32 kbps",
82 "64 kbps",
83 "96 kbps",
84 "128 kbps",
85 "160 kbps",
86 "192 kbps",
87 "224 kbps",
88 "256 kbps",
89 "288 kbps",
90 "320 kbps",
91 "352 kbps",
92 "384 kbps",
93 "416 kbps",
94 "448 kbps",
95 NULL
96 };
97 static const char * const mpeg_audio_l2_bitrate[] = {
98 "32 kbps",
99 "48 kbps",
100 "56 kbps",
101 "64 kbps",
102 "80 kbps",
103 "96 kbps",
104 "112 kbps",
105 "128 kbps",
106 "160 kbps",
107 "192 kbps",
108 "224 kbps",
109 "256 kbps",
110 "320 kbps",
111 "384 kbps",
112 NULL
113 };
114 static const char * const mpeg_audio_l3_bitrate[] = {
115 "32 kbps",
116 "40 kbps",
117 "48 kbps",
118 "56 kbps",
119 "64 kbps",
120 "80 kbps",
121 "96 kbps",
122 "112 kbps",
123 "128 kbps",
124 "160 kbps",
125 "192 kbps",
126 "224 kbps",
127 "256 kbps",
128 "320 kbps",
129 NULL
130 };
131 static const char * const mpeg_audio_ac3_bitrate[] = {
132 "32 kbps",
133 "40 kbps",
134 "48 kbps",
135 "56 kbps",
136 "64 kbps",
137 "80 kbps",
138 "96 kbps",
139 "112 kbps",
140 "128 kbps",
141 "160 kbps",
142 "192 kbps",
143 "224 kbps",
144 "256 kbps",
145 "320 kbps",
146 "384 kbps",
147 "448 kbps",
148 "512 kbps",
149 "576 kbps",
150 "640 kbps",
151 NULL
152 };
153 static const char * const mpeg_audio_mode[] = {
154 "Stereo",
155 "Joint Stereo",
156 "Dual",
157 "Mono",
158 NULL
159 };
160 static const char * const mpeg_audio_mode_extension[] = {
161 "Bound 4",
162 "Bound 8",
163 "Bound 12",
164 "Bound 16",
165 NULL
166 };
167 static const char * const mpeg_audio_emphasis[] = {
168 "No Emphasis",
169 "50/15 us",
170 "CCITT J17",
171 NULL
172 };
173 static const char * const mpeg_audio_crc[] = {
174 "No CRC",
175 "16-bit CRC",
176 NULL
177 };
178 static const char * const mpeg_audio_dec_playback[] = {
179 "Auto",
180 "Stereo",
181 "Left",
182 "Right",
183 "Mono",
184 "Swapped Stereo",
185 NULL
186 };
187 static const char * const mpeg_video_encoding[] = {
188 "MPEG-1",
189 "MPEG-2",
190 "MPEG-4 AVC",
191 NULL
192 };
193 static const char * const mpeg_video_aspect[] = {
194 "1x1",
195 "4x3",
196 "16x9",
197 "2.21x1",
198 NULL
199 };
200 static const char * const mpeg_video_bitrate_mode[] = {
201 "Variable Bitrate",
202 "Constant Bitrate",
203 NULL
204 };
205 static const char * const mpeg_stream_type[] = {
206 "MPEG-2 Program Stream",
207 "MPEG-2 Transport Stream",
208 "MPEG-1 System Stream",
209 "MPEG-2 DVD-compatible Stream",
210 "MPEG-1 VCD-compatible Stream",
211 "MPEG-2 SVCD-compatible Stream",
212 NULL
213 };
214 static const char * const mpeg_stream_vbi_fmt[] = {
215 "No VBI",
216 "Private Packet, IVTV Format",
217 NULL
218 };
219 static const char * const camera_power_line_frequency[] = {
220 "Disabled",
221 "50 Hz",
222 "60 Hz",
223 "Auto",
224 NULL
225 };
226 static const char * const camera_exposure_auto[] = {
227 "Auto Mode",
228 "Manual Mode",
229 "Shutter Priority Mode",
230 "Aperture Priority Mode",
231 NULL
232 };
233 static const char * const camera_exposure_metering[] = {
234 "Average",
235 "Center Weighted",
236 "Spot",
237 NULL
238 };
239 static const char * const camera_auto_focus_range[] = {
240 "Auto",
241 "Normal",
242 "Macro",
243 "Infinity",
244 NULL
245 };
246 static const char * const colorfx[] = {
247 "None",
248 "Black & White",
249 "Sepia",
250 "Negative",
251 "Emboss",
252 "Sketch",
253 "Sky Blue",
254 "Grass Green",
255 "Skin Whiten",
256 "Vivid",
257 "Aqua",
258 "Art Freeze",
259 "Silhouette",
260 "Solarization",
261 "Antique",
262 "Set Cb/Cr",
263 NULL
264 };
265 static const char * const auto_n_preset_white_balance[] = {
266 "Manual",
267 "Auto",
268 "Incandescent",
269 "Fluorescent",
270 "Fluorescent H",
271 "Horizon",
272 "Daylight",
273 "Flash",
274 "Cloudy",
275 "Shade",
276 NULL,
277 };
278 static const char * const camera_iso_sensitivity_auto[] = {
279 "Manual",
280 "Auto",
281 NULL
282 };
283 static const char * const scene_mode[] = {
284 "None",
285 "Backlight",
286 "Beach/Snow",
287 "Candle Light",
288 "Dusk/Dawn",
289 "Fall Colors",
290 "Fireworks",
291 "Landscape",
292 "Night",
293 "Party/Indoor",
294 "Portrait",
295 "Sports",
296 "Sunset",
297 "Text",
298 NULL
299 };
300 static const char * const tune_preemphasis[] = {
301 "No Preemphasis",
302 "50 Microseconds",
303 "75 Microseconds",
304 NULL,
305 };
306 static const char * const header_mode[] = {
307 "Separate Buffer",
308 "Joined With 1st Frame",
309 NULL,
310 };
311 static const char * const multi_slice[] = {
312 "Single",
313 "Max Macroblocks",
314 "Max Bytes",
315 NULL,
316 };
317 static const char * const entropy_mode[] = {
318 "CAVLC",
319 "CABAC",
320 NULL,
321 };
322 static const char * const mpeg_h264_level[] = {
323 "1",
324 "1b",
325 "1.1",
326 "1.2",
327 "1.3",
328 "2",
329 "2.1",
330 "2.2",
331 "3",
332 "3.1",
333 "3.2",
334 "4",
335 "4.1",
336 "4.2",
337 "5",
338 "5.1",
339 NULL,
340 };
341 static const char * const h264_loop_filter[] = {
342 "Enabled",
343 "Disabled",
344 "Disabled at Slice Boundary",
345 NULL,
346 };
347 static const char * const h264_profile[] = {
348 "Baseline",
349 "Constrained Baseline",
350 "Main",
351 "Extended",
352 "High",
353 "High 10",
354 "High 422",
355 "High 444 Predictive",
356 "High 10 Intra",
357 "High 422 Intra",
358 "High 444 Intra",
359 "CAVLC 444 Intra",
360 "Scalable Baseline",
361 "Scalable High",
362 "Scalable High Intra",
363 "Multiview High",
364 NULL,
365 };
366 static const char * const vui_sar_idc[] = {
367 "Unspecified",
368 "1:1",
369 "12:11",
370 "10:11",
371 "16:11",
372 "40:33",
373 "24:11",
374 "20:11",
375 "32:11",
376 "80:33",
377 "18:11",
378 "15:11",
379 "64:33",
380 "160:99",
381 "4:3",
382 "3:2",
383 "2:1",
384 "Extended SAR",
385 NULL,
386 };
387 static const char * const mpeg_mpeg4_level[] = {
388 "0",
389 "0b",
390 "1",
391 "2",
392 "3",
393 "3b",
394 "4",
395 "5",
396 NULL,
397 };
398 static const char * const mpeg4_profile[] = {
399 "Simple",
400 "Advanced Simple",
401 "Core",
402 "Simple Scalable",
403 "Advanced Coding Efficency",
404 NULL,
405 };
406
407 static const char * const flash_led_mode[] = {
408 "Off",
409 "Flash",
410 "Torch",
411 NULL,
412 };
413 static const char * const flash_strobe_source[] = {
414 "Software",
415 "External",
416 NULL,
417 };
418
419 static const char * const jpeg_chroma_subsampling[] = {
420 "4:4:4",
421 "4:2:2",
422 "4:2:0",
423 "4:1:1",
424 "4:1:0",
425 "Gray",
426 NULL,
427 };
428
429 switch (id) {
430 case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
431 return mpeg_audio_sampling_freq;
432 case V4L2_CID_MPEG_AUDIO_ENCODING:
433 return mpeg_audio_encoding;
434 case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
435 return mpeg_audio_l1_bitrate;
436 case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
437 return mpeg_audio_l2_bitrate;
438 case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
439 return mpeg_audio_l3_bitrate;
440 case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
441 return mpeg_audio_ac3_bitrate;
442 case V4L2_CID_MPEG_AUDIO_MODE:
443 return mpeg_audio_mode;
444 case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
445 return mpeg_audio_mode_extension;
446 case V4L2_CID_MPEG_AUDIO_EMPHASIS:
447 return mpeg_audio_emphasis;
448 case V4L2_CID_MPEG_AUDIO_CRC:
449 return mpeg_audio_crc;
450 case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK:
451 case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK:
452 return mpeg_audio_dec_playback;
453 case V4L2_CID_MPEG_VIDEO_ENCODING:
454 return mpeg_video_encoding;
455 case V4L2_CID_MPEG_VIDEO_ASPECT:
456 return mpeg_video_aspect;
457 case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
458 return mpeg_video_bitrate_mode;
459 case V4L2_CID_MPEG_STREAM_TYPE:
460 return mpeg_stream_type;
461 case V4L2_CID_MPEG_STREAM_VBI_FMT:
462 return mpeg_stream_vbi_fmt;
463 case V4L2_CID_POWER_LINE_FREQUENCY:
464 return camera_power_line_frequency;
465 case V4L2_CID_EXPOSURE_AUTO:
466 return camera_exposure_auto;
467 case V4L2_CID_EXPOSURE_METERING:
468 return camera_exposure_metering;
469 case V4L2_CID_AUTO_FOCUS_RANGE:
470 return camera_auto_focus_range;
471 case V4L2_CID_COLORFX:
472 return colorfx;
473 case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
474 return auto_n_preset_white_balance;
475 case V4L2_CID_ISO_SENSITIVITY_AUTO:
476 return camera_iso_sensitivity_auto;
477 case V4L2_CID_SCENE_MODE:
478 return scene_mode;
479 case V4L2_CID_TUNE_PREEMPHASIS:
480 return tune_preemphasis;
481 case V4L2_CID_FLASH_LED_MODE:
482 return flash_led_mode;
483 case V4L2_CID_FLASH_STROBE_SOURCE:
484 return flash_strobe_source;
485 case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
486 return header_mode;
487 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
488 return multi_slice;
489 case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
490 return entropy_mode;
491 case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
492 return mpeg_h264_level;
493 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
494 return h264_loop_filter;
495 case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
496 return h264_profile;
497 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
498 return vui_sar_idc;
499 case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
500 return mpeg_mpeg4_level;
501 case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
502 return mpeg4_profile;
503 case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
504 return jpeg_chroma_subsampling;
505
506 default:
507 return NULL;
508 }
509}
510EXPORT_SYMBOL(v4l2_ctrl_get_menu);
511
512/* Return the control name. */
513const char *v4l2_ctrl_get_name(u32 id)
514{
515 switch (id) {
516 /* USER controls */
517 /* Keep the order of the 'case's the same as in videodev2.h! */
518 case V4L2_CID_USER_CLASS: return "User Controls";
519 case V4L2_CID_BRIGHTNESS: return "Brightness";
520 case V4L2_CID_CONTRAST: return "Contrast";
521 case V4L2_CID_SATURATION: return "Saturation";
522 case V4L2_CID_HUE: return "Hue";
523 case V4L2_CID_AUDIO_VOLUME: return "Volume";
524 case V4L2_CID_AUDIO_BALANCE: return "Balance";
525 case V4L2_CID_AUDIO_BASS: return "Bass";
526 case V4L2_CID_AUDIO_TREBLE: return "Treble";
527 case V4L2_CID_AUDIO_MUTE: return "Mute";
528 case V4L2_CID_AUDIO_LOUDNESS: return "Loudness";
529 case V4L2_CID_BLACK_LEVEL: return "Black Level";
530 case V4L2_CID_AUTO_WHITE_BALANCE: return "White Balance, Automatic";
531 case V4L2_CID_DO_WHITE_BALANCE: return "Do White Balance";
532 case V4L2_CID_RED_BALANCE: return "Red Balance";
533 case V4L2_CID_BLUE_BALANCE: return "Blue Balance";
534 case V4L2_CID_GAMMA: return "Gamma";
535 case V4L2_CID_EXPOSURE: return "Exposure";
536 case V4L2_CID_AUTOGAIN: return "Gain, Automatic";
537 case V4L2_CID_GAIN: return "Gain";
538 case V4L2_CID_HFLIP: return "Horizontal Flip";
539 case V4L2_CID_VFLIP: return "Vertical Flip";
540 case V4L2_CID_HCENTER: return "Horizontal Center";
541 case V4L2_CID_VCENTER: return "Vertical Center";
542 case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency";
543 case V4L2_CID_HUE_AUTO: return "Hue, Automatic";
544 case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature";
545 case V4L2_CID_SHARPNESS: return "Sharpness";
546 case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation";
547 case V4L2_CID_CHROMA_AGC: return "Chroma AGC";
548 case V4L2_CID_COLOR_KILLER: return "Color Killer";
549 case V4L2_CID_COLORFX: return "Color Effects";
550 case V4L2_CID_AUTOBRIGHTNESS: return "Brightness, Automatic";
551 case V4L2_CID_BAND_STOP_FILTER: return "Band-Stop Filter";
552 case V4L2_CID_ROTATE: return "Rotate";
553 case V4L2_CID_BG_COLOR: return "Background Color";
554 case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
555 case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1";
556 case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2";
557 case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers";
558 case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers";
559 case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component";
560 case V4L2_CID_COLORFX_CBCR: return "Color Effects, CbCr";
561
562 /* MPEG controls */
563 /* Keep the order of the 'case's the same as in videodev2.h! */
564 case V4L2_CID_MPEG_CLASS: return "MPEG Encoder Controls";
565 case V4L2_CID_MPEG_STREAM_TYPE: return "Stream Type";
566 case V4L2_CID_MPEG_STREAM_PID_PMT: return "Stream PMT Program ID";
567 case V4L2_CID_MPEG_STREAM_PID_AUDIO: return "Stream Audio Program ID";
568 case V4L2_CID_MPEG_STREAM_PID_VIDEO: return "Stream Video Program ID";
569 case V4L2_CID_MPEG_STREAM_PID_PCR: return "Stream PCR Program ID";
570 case V4L2_CID_MPEG_STREAM_PES_ID_AUDIO: return "Stream PES Audio ID";
571 case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: return "Stream PES Video ID";
572 case V4L2_CID_MPEG_STREAM_VBI_FMT: return "Stream VBI Format";
573 case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return "Audio Sampling Frequency";
574 case V4L2_CID_MPEG_AUDIO_ENCODING: return "Audio Encoding";
575 case V4L2_CID_MPEG_AUDIO_L1_BITRATE: return "Audio Layer I Bitrate";
576 case V4L2_CID_MPEG_AUDIO_L2_BITRATE: return "Audio Layer II Bitrate";
577 case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return "Audio Layer III Bitrate";
578 case V4L2_CID_MPEG_AUDIO_MODE: return "Audio Stereo Mode";
579 case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: return "Audio Stereo Mode Extension";
580 case V4L2_CID_MPEG_AUDIO_EMPHASIS: return "Audio Emphasis";
581 case V4L2_CID_MPEG_AUDIO_CRC: return "Audio CRC";
582 case V4L2_CID_MPEG_AUDIO_MUTE: return "Audio Mute";
583 case V4L2_CID_MPEG_AUDIO_AAC_BITRATE: return "Audio AAC Bitrate";
584 case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: return "Audio AC-3 Bitrate";
585 case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: return "Audio Playback";
586 case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: return "Audio Multilingual Playback";
587 case V4L2_CID_MPEG_VIDEO_ENCODING: return "Video Encoding";
588 case V4L2_CID_MPEG_VIDEO_ASPECT: return "Video Aspect";
589 case V4L2_CID_MPEG_VIDEO_B_FRAMES: return "Video B Frames";
590 case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return "Video GOP Size";
591 case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return "Video GOP Closure";
592 case V4L2_CID_MPEG_VIDEO_PULLDOWN: return "Video Pulldown";
593 case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return "Video Bitrate Mode";
594 case V4L2_CID_MPEG_VIDEO_BITRATE: return "Video Bitrate";
595 case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: return "Video Peak Bitrate";
596 case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation";
597 case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute";
598 case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
599 case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface";
600 case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable";
601 case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs";
602 case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable";
603 case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control";
604 case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode";
605 case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics";
606 case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value";
607 case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value";
608 case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value";
609 case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value";
610 case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value";
611 case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value";
612 case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value";
613 case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value";
614 case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value";
615 case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value";
616 case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable";
617 case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size";
618 case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode";
619 case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period";
620 case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level";
621 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset";
622 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset";
623 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: return "H264 Loop Filter Mode";
624 case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return "H264 Profile";
625 case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT: return "Vertical Size of SAR";
626 case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: return "Horizontal Size of SAR";
627 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable";
628 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC";
629 case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
630 case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value";
631 case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value";
632 case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value";
633 case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value";
634 case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level";
635 case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile";
636 case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable";
637 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice";
638 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice";
639 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method";
640 case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size";
641 case V4L2_CID_MPEG_VIDEO_DEC_PTS: return "Video Decoder PTS";
642 case V4L2_CID_MPEG_VIDEO_DEC_FRAME: return "Video Decoder Frame Count";
643
644 /* CAMERA controls */
645 /* Keep the order of the 'case's the same as in videodev2.h! */
646 case V4L2_CID_CAMERA_CLASS: return "Camera Controls";
647 case V4L2_CID_EXPOSURE_AUTO: return "Auto Exposure";
648 case V4L2_CID_EXPOSURE_ABSOLUTE: return "Exposure Time, Absolute";
649 case V4L2_CID_EXPOSURE_AUTO_PRIORITY: return "Exposure, Dynamic Framerate";
650 case V4L2_CID_PAN_RELATIVE: return "Pan, Relative";
651 case V4L2_CID_TILT_RELATIVE: return "Tilt, Relative";
652 case V4L2_CID_PAN_RESET: return "Pan, Reset";
653 case V4L2_CID_TILT_RESET: return "Tilt, Reset";
654 case V4L2_CID_PAN_ABSOLUTE: return "Pan, Absolute";
655 case V4L2_CID_TILT_ABSOLUTE: return "Tilt, Absolute";
656 case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute";
657 case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative";
658 case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic Continuous";
659 case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute";
660 case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative";
661 case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous";
662 case V4L2_CID_PRIVACY: return "Privacy";
663 case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute";
664 case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative";
665 case V4L2_CID_AUTO_EXPOSURE_BIAS: return "Auto Exposure, Bias";
666 case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: return "White Balance, Auto & Preset";
667 case V4L2_CID_WIDE_DYNAMIC_RANGE: return "Wide Dynamic Range";
668 case V4L2_CID_IMAGE_STABILIZATION: return "Image Stabilization";
669 case V4L2_CID_ISO_SENSITIVITY: return "ISO Sensitivity";
670 case V4L2_CID_ISO_SENSITIVITY_AUTO: return "ISO Sensitivity, Auto";
671 case V4L2_CID_EXPOSURE_METERING: return "Exposure, Metering Mode";
672 case V4L2_CID_SCENE_MODE: return "Scene Mode";
673 case V4L2_CID_3A_LOCK: return "3A Lock";
674 case V4L2_CID_AUTO_FOCUS_START: return "Auto Focus, Start";
675 case V4L2_CID_AUTO_FOCUS_STOP: return "Auto Focus, Stop";
676 case V4L2_CID_AUTO_FOCUS_STATUS: return "Auto Focus, Status";
677 case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range";
678
679 /* FM Radio Modulator control */
680 /* Keep the order of the 'case's the same as in videodev2.h! */
681 case V4L2_CID_FM_TX_CLASS: return "FM Radio Modulator Controls";
682 case V4L2_CID_RDS_TX_DEVIATION: return "RDS Signal Deviation";
683 case V4L2_CID_RDS_TX_PI: return "RDS Program ID";
684 case V4L2_CID_RDS_TX_PTY: return "RDS Program Type";
685 case V4L2_CID_RDS_TX_PS_NAME: return "RDS PS Name";
686 case V4L2_CID_RDS_TX_RADIO_TEXT: return "RDS Radio Text";
687 case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled";
688 case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time";
689 case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation";
690 case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled";
691 case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain";
692 case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold";
693 case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time";
694 case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: return "Audio Compression Release Time";
695 case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled";
696 case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation";
697 case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency";
698 case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis";
699 case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level";
700 case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor";
701
702 /* Flash controls */
703 case V4L2_CID_FLASH_CLASS: return "Flash Controls";
704 case V4L2_CID_FLASH_LED_MODE: return "LED Mode";
705 case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source";
706 case V4L2_CID_FLASH_STROBE: return "Strobe";
707 case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe";
708 case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status";
709 case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout";
710 case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode";
711 case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode";
712 case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator";
713 case V4L2_CID_FLASH_FAULT: return "Faults";
714 case V4L2_CID_FLASH_CHARGE: return "Charge";
715 case V4L2_CID_FLASH_READY: return "Ready to Strobe";
716
717 /* JPEG encoder controls */
718 /* Keep the order of the 'case's the same as in videodev2.h! */
719 case V4L2_CID_JPEG_CLASS: return "JPEG Compression Controls";
720 case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: return "Chroma Subsampling";
721 case V4L2_CID_JPEG_RESTART_INTERVAL: return "Restart Interval";
722 case V4L2_CID_JPEG_COMPRESSION_QUALITY: return "Compression Quality";
723 case V4L2_CID_JPEG_ACTIVE_MARKER: return "Active Markers";
724
725 /* Image source controls */
726 case V4L2_CID_IMAGE_SOURCE_CLASS: return "Image Source Controls";
727 case V4L2_CID_VBLANK: return "Vertical Blanking";
728 case V4L2_CID_HBLANK: return "Horizontal Blanking";
729 case V4L2_CID_ANALOGUE_GAIN: return "Analogue Gain";
730
731 /* Image processing controls */
732 case V4L2_CID_IMAGE_PROC_CLASS: return "Image Processing Controls";
733 case V4L2_CID_LINK_FREQ: return "Link Frequency";
734 case V4L2_CID_PIXEL_RATE: return "Pixel Rate";
735
736 default:
737 return NULL;
738 }
739}
740EXPORT_SYMBOL(v4l2_ctrl_get_name);
741
742void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
743 s32 *min, s32 *max, s32 *step, s32 *def, u32 *flags)
744{
745 *name = v4l2_ctrl_get_name(id);
746 *flags = 0;
747
748 switch (id) {
749 case V4L2_CID_AUDIO_MUTE:
750 case V4L2_CID_AUDIO_LOUDNESS:
751 case V4L2_CID_AUTO_WHITE_BALANCE:
752 case V4L2_CID_AUTOGAIN:
753 case V4L2_CID_HFLIP:
754 case V4L2_CID_VFLIP:
755 case V4L2_CID_HUE_AUTO:
756 case V4L2_CID_CHROMA_AGC:
757 case V4L2_CID_COLOR_KILLER:
758 case V4L2_CID_AUTOBRIGHTNESS:
759 case V4L2_CID_MPEG_AUDIO_MUTE:
760 case V4L2_CID_MPEG_VIDEO_MUTE:
761 case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
762 case V4L2_CID_MPEG_VIDEO_PULLDOWN:
763 case V4L2_CID_EXPOSURE_AUTO_PRIORITY:
764 case V4L2_CID_FOCUS_AUTO:
765 case V4L2_CID_PRIVACY:
766 case V4L2_CID_AUDIO_LIMITER_ENABLED:
767 case V4L2_CID_AUDIO_COMPRESSION_ENABLED:
768 case V4L2_CID_PILOT_TONE_ENABLED:
769 case V4L2_CID_ILLUMINATORS_1:
770 case V4L2_CID_ILLUMINATORS_2:
771 case V4L2_CID_FLASH_STROBE_STATUS:
772 case V4L2_CID_FLASH_CHARGE:
773 case V4L2_CID_FLASH_READY:
774 case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
775 case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
776 case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
777 case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
778 case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
779 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
780 case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
781 case V4L2_CID_WIDE_DYNAMIC_RANGE:
782 case V4L2_CID_IMAGE_STABILIZATION:
783 *type = V4L2_CTRL_TYPE_BOOLEAN;
784 *min = 0;
785 *max = *step = 1;
786 break;
787 case V4L2_CID_PAN_RESET:
788 case V4L2_CID_TILT_RESET:
789 case V4L2_CID_FLASH_STROBE:
790 case V4L2_CID_FLASH_STROBE_STOP:
791 case V4L2_CID_AUTO_FOCUS_START:
792 case V4L2_CID_AUTO_FOCUS_STOP:
793 *type = V4L2_CTRL_TYPE_BUTTON;
794 *flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
795 *min = *max = *step = *def = 0;
796 break;
797 case V4L2_CID_POWER_LINE_FREQUENCY:
798 case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
799 case V4L2_CID_MPEG_AUDIO_ENCODING:
800 case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
801 case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
802 case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
803 case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
804 case V4L2_CID_MPEG_AUDIO_MODE:
805 case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
806 case V4L2_CID_MPEG_AUDIO_EMPHASIS:
807 case V4L2_CID_MPEG_AUDIO_CRC:
808 case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK:
809 case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK:
810 case V4L2_CID_MPEG_VIDEO_ENCODING:
811 case V4L2_CID_MPEG_VIDEO_ASPECT:
812 case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
813 case V4L2_CID_MPEG_STREAM_TYPE:
814 case V4L2_CID_MPEG_STREAM_VBI_FMT:
815 case V4L2_CID_EXPOSURE_AUTO:
816 case V4L2_CID_AUTO_FOCUS_RANGE:
817 case V4L2_CID_COLORFX:
818 case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
819 case V4L2_CID_TUNE_PREEMPHASIS:
820 case V4L2_CID_FLASH_LED_MODE:
821 case V4L2_CID_FLASH_STROBE_SOURCE:
822 case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
823 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
824 case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
825 case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
826 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
827 case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
828 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
829 case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
830 case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
831 case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
832 case V4L2_CID_ISO_SENSITIVITY_AUTO:
833 case V4L2_CID_EXPOSURE_METERING:
834 case V4L2_CID_SCENE_MODE:
835 *type = V4L2_CTRL_TYPE_MENU;
836 break;
837 case V4L2_CID_LINK_FREQ:
838 *type = V4L2_CTRL_TYPE_INTEGER_MENU;
839 break;
840 case V4L2_CID_RDS_TX_PS_NAME:
841 case V4L2_CID_RDS_TX_RADIO_TEXT:
842 *type = V4L2_CTRL_TYPE_STRING;
843 break;
844 case V4L2_CID_ISO_SENSITIVITY:
845 case V4L2_CID_AUTO_EXPOSURE_BIAS:
846 *type = V4L2_CTRL_TYPE_INTEGER_MENU;
847 break;
848 case V4L2_CID_USER_CLASS:
849 case V4L2_CID_CAMERA_CLASS:
850 case V4L2_CID_MPEG_CLASS:
851 case V4L2_CID_FM_TX_CLASS:
852 case V4L2_CID_FLASH_CLASS:
853 case V4L2_CID_JPEG_CLASS:
854 case V4L2_CID_IMAGE_SOURCE_CLASS:
855 case V4L2_CID_IMAGE_PROC_CLASS:
856 *type = V4L2_CTRL_TYPE_CTRL_CLASS;
857 /* You can neither read not write these */
858 *flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY;
859 *min = *max = *step = *def = 0;
860 break;
861 case V4L2_CID_BG_COLOR:
862 *type = V4L2_CTRL_TYPE_INTEGER;
863 *step = 1;
864 *min = 0;
865 /* Max is calculated as RGB888 that is 2^24 */
866 *max = 0xFFFFFF;
867 break;
868 case V4L2_CID_FLASH_FAULT:
869 case V4L2_CID_JPEG_ACTIVE_MARKER:
870 case V4L2_CID_3A_LOCK:
871 case V4L2_CID_AUTO_FOCUS_STATUS:
872 *type = V4L2_CTRL_TYPE_BITMASK;
873 break;
874 case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
875 case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
876 *type = V4L2_CTRL_TYPE_INTEGER;
877 *flags |= V4L2_CTRL_FLAG_READ_ONLY;
878 break;
879 case V4L2_CID_MPEG_VIDEO_DEC_FRAME:
880 case V4L2_CID_MPEG_VIDEO_DEC_PTS:
881 *flags |= V4L2_CTRL_FLAG_VOLATILE;
882 /* Fall through */
883 case V4L2_CID_PIXEL_RATE:
884 *type = V4L2_CTRL_TYPE_INTEGER64;
885 *flags |= V4L2_CTRL_FLAG_READ_ONLY;
886 *min = *max = *step = *def = 0;
887 break;
888 default:
889 *type = V4L2_CTRL_TYPE_INTEGER;
890 break;
891 }
892 switch (id) {
893 case V4L2_CID_MPEG_AUDIO_ENCODING:
894 case V4L2_CID_MPEG_AUDIO_MODE:
895 case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
896 case V4L2_CID_MPEG_VIDEO_B_FRAMES:
897 case V4L2_CID_MPEG_STREAM_TYPE:
898 *flags |= V4L2_CTRL_FLAG_UPDATE;
899 break;
900 case V4L2_CID_AUDIO_VOLUME:
901 case V4L2_CID_AUDIO_BALANCE:
902 case V4L2_CID_AUDIO_BASS:
903 case V4L2_CID_AUDIO_TREBLE:
904 case V4L2_CID_BRIGHTNESS:
905 case V4L2_CID_CONTRAST:
906 case V4L2_CID_SATURATION:
907 case V4L2_CID_HUE:
908 case V4L2_CID_RED_BALANCE:
909 case V4L2_CID_BLUE_BALANCE:
910 case V4L2_CID_GAMMA:
911 case V4L2_CID_SHARPNESS:
912 case V4L2_CID_CHROMA_GAIN:
913 case V4L2_CID_RDS_TX_DEVIATION:
914 case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME:
915 case V4L2_CID_AUDIO_LIMITER_DEVIATION:
916 case V4L2_CID_AUDIO_COMPRESSION_GAIN:
917 case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD:
918 case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME:
919 case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME:
920 case V4L2_CID_PILOT_TONE_DEVIATION:
921 case V4L2_CID_PILOT_TONE_FREQUENCY:
922 case V4L2_CID_TUNE_POWER_LEVEL:
923 case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
924 *flags |= V4L2_CTRL_FLAG_SLIDER;
925 break;
926 case V4L2_CID_PAN_RELATIVE:
927 case V4L2_CID_TILT_RELATIVE:
928 case V4L2_CID_FOCUS_RELATIVE:
929 case V4L2_CID_IRIS_RELATIVE:
930 case V4L2_CID_ZOOM_RELATIVE:
931 *flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
932 break;
933 case V4L2_CID_FLASH_STROBE_STATUS:
934 case V4L2_CID_AUTO_FOCUS_STATUS:
935 case V4L2_CID_FLASH_READY:
936 *flags |= V4L2_CTRL_FLAG_READ_ONLY;
937 break;
938 }
939}
940EXPORT_SYMBOL(v4l2_ctrl_fill);
941
942/* Helper function to determine whether the control type is compatible with
943 VIDIOC_G/S_CTRL. */
944static bool type_is_int(const struct v4l2_ctrl *ctrl)
945{
946 switch (ctrl->type) {
947 case V4L2_CTRL_TYPE_INTEGER64:
948 case V4L2_CTRL_TYPE_STRING:
949 /* Nope, these need v4l2_ext_control */
950 return false;
951 default:
952 return true;
953 }
954}
955
956static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
957{
958 memset(ev->reserved, 0, sizeof(ev->reserved));
959 ev->type = V4L2_EVENT_CTRL;
960 ev->id = ctrl->id;
961 ev->u.ctrl.changes = changes;
962 ev->u.ctrl.type = ctrl->type;
963 ev->u.ctrl.flags = ctrl->flags;
964 if (ctrl->type == V4L2_CTRL_TYPE_STRING)
965 ev->u.ctrl.value64 = 0;
966 else
967 ev->u.ctrl.value64 = ctrl->cur.val64;
968 ev->u.ctrl.minimum = ctrl->minimum;
969 ev->u.ctrl.maximum = ctrl->maximum;
970 if (ctrl->type == V4L2_CTRL_TYPE_MENU
971 || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
972 ev->u.ctrl.step = 1;
973 else
974 ev->u.ctrl.step = ctrl->step;
975 ev->u.ctrl.default_value = ctrl->default_value;
976}
977
978static void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes)
979{
980 struct v4l2_event ev;
981 struct v4l2_subscribed_event *sev;
982
983 if (list_empty(&ctrl->ev_subs))
984 return;
985 fill_event(&ev, ctrl, changes);
986
987 list_for_each_entry(sev, &ctrl->ev_subs, node)
988 if (sev->fh != fh ||
989 (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK))
990 v4l2_event_queue_fh(sev->fh, &ev);
991}
992
993/* Helper function: copy the current control value back to the caller */
994static int cur_to_user(struct v4l2_ext_control *c,
995 struct v4l2_ctrl *ctrl)
996{
997 u32 len;
998
999 switch (ctrl->type) {
1000 case V4L2_CTRL_TYPE_STRING:
1001 len = strlen(ctrl->cur.string);
1002 if (c->size < len + 1) {
1003 c->size = len + 1;
1004 return -ENOSPC;
1005 }
1006 return copy_to_user(c->string, ctrl->cur.string,
1007 len + 1) ? -EFAULT : 0;
1008 case V4L2_CTRL_TYPE_INTEGER64:
1009 c->value64 = ctrl->cur.val64;
1010 break;
1011 default:
1012 c->value = ctrl->cur.val;
1013 break;
1014 }
1015 return 0;
1016}
1017
1018/* Helper function: copy the caller-provider value as the new control value */
1019static int user_to_new(struct v4l2_ext_control *c,
1020 struct v4l2_ctrl *ctrl)
1021{
1022 int ret;
1023 u32 size;
1024
1025 ctrl->is_new = 1;
1026 switch (ctrl->type) {
1027 case V4L2_CTRL_TYPE_INTEGER64:
1028 ctrl->val64 = c->value64;
1029 break;
1030 case V4L2_CTRL_TYPE_STRING:
1031 size = c->size;
1032 if (size == 0)
1033 return -ERANGE;
1034 if (size > ctrl->maximum + 1)
1035 size = ctrl->maximum + 1;
1036 ret = copy_from_user(ctrl->string, c->string, size);
1037 if (!ret) {
1038 char last = ctrl->string[size - 1];
1039
1040 ctrl->string[size - 1] = 0;
1041 /* If the string was longer than ctrl->maximum,
1042 then return an error. */
1043 if (strlen(ctrl->string) == ctrl->maximum && last)
1044 return -ERANGE;
1045 }
1046 return ret ? -EFAULT : 0;
1047 default:
1048 ctrl->val = c->value;
1049 break;
1050 }
1051 return 0;
1052}
1053
1054/* Helper function: copy the new control value back to the caller */
1055static int new_to_user(struct v4l2_ext_control *c,
1056 struct v4l2_ctrl *ctrl)
1057{
1058 u32 len;
1059
1060 switch (ctrl->type) {
1061 case V4L2_CTRL_TYPE_STRING:
1062 len = strlen(ctrl->string);
1063 if (c->size < len + 1) {
1064 c->size = ctrl->maximum + 1;
1065 return -ENOSPC;
1066 }
1067 return copy_to_user(c->string, ctrl->string,
1068 len + 1) ? -EFAULT : 0;
1069 case V4L2_CTRL_TYPE_INTEGER64:
1070 c->value64 = ctrl->val64;
1071 break;
1072 default:
1073 c->value = ctrl->val;
1074 break;
1075 }
1076 return 0;
1077}
1078
1079/* Copy the new value to the current value. */
1080static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
1081 bool update_inactive)
1082{
1083 bool changed = false;
1084
1085 if (ctrl == NULL)
1086 return;
1087 switch (ctrl->type) {
1088 case V4L2_CTRL_TYPE_BUTTON:
1089 changed = true;
1090 break;
1091 case V4L2_CTRL_TYPE_STRING:
1092 /* strings are always 0-terminated */
1093 changed = strcmp(ctrl->string, ctrl->cur.string);
1094 strcpy(ctrl->cur.string, ctrl->string);
1095 break;
1096 case V4L2_CTRL_TYPE_INTEGER64:
1097 changed = ctrl->val64 != ctrl->cur.val64;
1098 ctrl->cur.val64 = ctrl->val64;
1099 break;
1100 default:
1101 changed = ctrl->val != ctrl->cur.val;
1102 ctrl->cur.val = ctrl->val;
1103 break;
1104 }
1105 if (update_inactive) {
1106 /* Note: update_inactive can only be true for auto clusters. */
1107 ctrl->flags &=
1108 ~(V4L2_CTRL_FLAG_INACTIVE | V4L2_CTRL_FLAG_VOLATILE);
1109 if (!is_cur_manual(ctrl->cluster[0])) {
1110 ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
1111 if (ctrl->cluster[0]->has_volatiles)
1112 ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
1113 }
1114 fh = NULL;
1115 }
1116 if (changed || update_inactive) {
1117 /* If a control was changed that was not one of the controls
1118 modified by the application, then send the event to all. */
1119 if (!ctrl->is_new)
1120 fh = NULL;
1121 send_event(fh, ctrl,
1122 (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) |
1123 (update_inactive ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
1124 }
1125}
1126
1127/* Copy the current value to the new value */
1128static void cur_to_new(struct v4l2_ctrl *ctrl)
1129{
1130 if (ctrl == NULL)
1131 return;
1132 switch (ctrl->type) {
1133 case V4L2_CTRL_TYPE_STRING:
1134 /* strings are always 0-terminated */
1135 strcpy(ctrl->string, ctrl->cur.string);
1136 break;
1137 case V4L2_CTRL_TYPE_INTEGER64:
1138 ctrl->val64 = ctrl->cur.val64;
1139 break;
1140 default:
1141 ctrl->val = ctrl->cur.val;
1142 break;
1143 }
1144}
1145
1146/* Return non-zero if one or more of the controls in the cluster has a new
1147 value that differs from the current value. */
1148static int cluster_changed(struct v4l2_ctrl *master)
1149{
1150 int diff = 0;
1151 int i;
1152
1153 for (i = 0; !diff && i < master->ncontrols; i++) {
1154 struct v4l2_ctrl *ctrl = master->cluster[i];
1155
1156 if (ctrl == NULL)
1157 continue;
1158 switch (ctrl->type) {
1159 case V4L2_CTRL_TYPE_BUTTON:
1160 /* Button controls are always 'different' */
1161 return 1;
1162 case V4L2_CTRL_TYPE_STRING:
1163 /* strings are always 0-terminated */
1164 diff = strcmp(ctrl->string, ctrl->cur.string);
1165 break;
1166 case V4L2_CTRL_TYPE_INTEGER64:
1167 diff = ctrl->val64 != ctrl->cur.val64;
1168 break;
1169 default:
1170 diff = ctrl->val != ctrl->cur.val;
1171 break;
1172 }
1173 }
1174 return diff;
1175}
1176
1177/* Validate integer-type control */
1178static int validate_new_int(const struct v4l2_ctrl *ctrl, s32 *pval)
1179{
1180 s32 val = *pval;
1181 u32 offset;
1182
1183 switch (ctrl->type) {
1184 case V4L2_CTRL_TYPE_INTEGER:
1185 /* Round towards the closest legal value */
1186 val += ctrl->step / 2;
1187 if (val < ctrl->minimum)
1188 val = ctrl->minimum;
1189 if (val > ctrl->maximum)
1190 val = ctrl->maximum;
1191 offset = val - ctrl->minimum;
1192 offset = ctrl->step * (offset / ctrl->step);
1193 val = ctrl->minimum + offset;
1194 *pval = val;
1195 return 0;
1196
1197 case V4L2_CTRL_TYPE_BOOLEAN:
1198 *pval = !!val;
1199 return 0;
1200
1201 case V4L2_CTRL_TYPE_MENU:
1202 case V4L2_CTRL_TYPE_INTEGER_MENU:
1203 if (val < ctrl->minimum || val > ctrl->maximum)
1204 return -ERANGE;
1205 if (ctrl->menu_skip_mask & (1 << val))
1206 return -EINVAL;
1207 if (ctrl->type == V4L2_CTRL_TYPE_MENU &&
1208 ctrl->qmenu[val][0] == '\0')
1209 return -EINVAL;
1210 return 0;
1211
1212 case V4L2_CTRL_TYPE_BITMASK:
1213 *pval &= ctrl->maximum;
1214 return 0;
1215
1216 case V4L2_CTRL_TYPE_BUTTON:
1217 case V4L2_CTRL_TYPE_CTRL_CLASS:
1218 *pval = 0;
1219 return 0;
1220
1221 default:
1222 return -EINVAL;
1223 }
1224}
1225
1226/* Validate a new control */
1227static int validate_new(const struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c)
1228{
1229 char *s = c->string;
1230 size_t len;
1231
1232 switch (ctrl->type) {
1233 case V4L2_CTRL_TYPE_INTEGER:
1234 case V4L2_CTRL_TYPE_BOOLEAN:
1235 case V4L2_CTRL_TYPE_MENU:
1236 case V4L2_CTRL_TYPE_INTEGER_MENU:
1237 case V4L2_CTRL_TYPE_BITMASK:
1238 case V4L2_CTRL_TYPE_BUTTON:
1239 case V4L2_CTRL_TYPE_CTRL_CLASS:
1240 return validate_new_int(ctrl, &c->value);
1241
1242 case V4L2_CTRL_TYPE_INTEGER64:
1243 return 0;
1244
1245 case V4L2_CTRL_TYPE_STRING:
1246 len = strlen(s);
1247 if (len < ctrl->minimum)
1248 return -ERANGE;
1249 if ((len - ctrl->minimum) % ctrl->step)
1250 return -ERANGE;
1251 return 0;
1252
1253 default:
1254 return -EINVAL;
1255 }
1256}
1257
1258static inline u32 node2id(struct list_head *node)
1259{
1260 return list_entry(node, struct v4l2_ctrl_ref, node)->ctrl->id;
1261}
1262
1263/* Set the handler's error code if it wasn't set earlier already */
1264static inline int handler_set_err(struct v4l2_ctrl_handler *hdl, int err)
1265{
1266 if (hdl->error == 0)
1267 hdl->error = err;
1268 return err;
1269}
1270
1271/* Initialize the handler */
1272int v4l2_ctrl_handler_init(struct v4l2_ctrl_handler *hdl,
1273 unsigned nr_of_controls_hint)
1274{
1275 hdl->lock = &hdl->_lock;
1276 mutex_init(hdl->lock);
1277 INIT_LIST_HEAD(&hdl->ctrls);
1278 INIT_LIST_HEAD(&hdl->ctrl_refs);
1279 hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8;
1280 hdl->buckets = kcalloc(hdl->nr_of_buckets, sizeof(hdl->buckets[0]),
1281 GFP_KERNEL);
1282 hdl->error = hdl->buckets ? 0 : -ENOMEM;
1283 return hdl->error;
1284}
1285EXPORT_SYMBOL(v4l2_ctrl_handler_init);
1286
1287/* Free all controls and control refs */
1288void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
1289{
1290 struct v4l2_ctrl_ref *ref, *next_ref;
1291 struct v4l2_ctrl *ctrl, *next_ctrl;
1292 struct v4l2_subscribed_event *sev, *next_sev;
1293
1294 if (hdl == NULL || hdl->buckets == NULL)
1295 return;
1296
1297 mutex_lock(hdl->lock);
1298 /* Free all nodes */
1299 list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) {
1300 list_del(&ref->node);
1301 kfree(ref);
1302 }
1303 /* Free all controls owned by the handler */
1304 list_for_each_entry_safe(ctrl, next_ctrl, &hdl->ctrls, node) {
1305 list_del(&ctrl->node);
1306 list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node)
1307 list_del(&sev->node);
1308 kfree(ctrl);
1309 }
1310 kfree(hdl->buckets);
1311 hdl->buckets = NULL;
1312 hdl->cached = NULL;
1313 hdl->error = 0;
1314 mutex_unlock(hdl->lock);
1315}
1316EXPORT_SYMBOL(v4l2_ctrl_handler_free);
1317
1318/* For backwards compatibility: V4L2_CID_PRIVATE_BASE should no longer
1319 be used except in G_CTRL, S_CTRL, QUERYCTRL and QUERYMENU when dealing
1320 with applications that do not use the NEXT_CTRL flag.
1321
1322 We just find the n-th private user control. It's O(N), but that should not
1323 be an issue in this particular case. */
1324static struct v4l2_ctrl_ref *find_private_ref(
1325 struct v4l2_ctrl_handler *hdl, u32 id)
1326{
1327 struct v4l2_ctrl_ref *ref;
1328
1329 id -= V4L2_CID_PRIVATE_BASE;
1330 list_for_each_entry(ref, &hdl->ctrl_refs, node) {
1331 /* Search for private user controls that are compatible with
1332 VIDIOC_G/S_CTRL. */
1333 if (V4L2_CTRL_ID2CLASS(ref->ctrl->id) == V4L2_CTRL_CLASS_USER &&
1334 V4L2_CTRL_DRIVER_PRIV(ref->ctrl->id)) {
1335 if (!type_is_int(ref->ctrl))
1336 continue;
1337 if (id == 0)
1338 return ref;
1339 id--;
1340 }
1341 }
1342 return NULL;
1343}
1344
1345/* Find a control with the given ID. */
1346static struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id)
1347{
1348 struct v4l2_ctrl_ref *ref;
1349 int bucket;
1350
1351 id &= V4L2_CTRL_ID_MASK;
1352
1353 /* Old-style private controls need special handling */
1354 if (id >= V4L2_CID_PRIVATE_BASE)
1355 return find_private_ref(hdl, id);
1356 bucket = id % hdl->nr_of_buckets;
1357
1358 /* Simple optimization: cache the last control found */
1359 if (hdl->cached && hdl->cached->ctrl->id == id)
1360 return hdl->cached;
1361
1362 /* Not in cache, search the hash */
1363 ref = hdl->buckets ? hdl->buckets[bucket] : NULL;
1364 while (ref && ref->ctrl->id != id)
1365 ref = ref->next;
1366
1367 if (ref)
1368 hdl->cached = ref; /* cache it! */
1369 return ref;
1370}
1371
1372/* Find a control with the given ID. Take the handler's lock first. */
1373static struct v4l2_ctrl_ref *find_ref_lock(
1374 struct v4l2_ctrl_handler *hdl, u32 id)
1375{
1376 struct v4l2_ctrl_ref *ref = NULL;
1377
1378 if (hdl) {
1379 mutex_lock(hdl->lock);
1380 ref = find_ref(hdl, id);
1381 mutex_unlock(hdl->lock);
1382 }
1383 return ref;
1384}
1385
1386/* Find a control with the given ID. */
1387struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
1388{
1389 struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
1390
1391 return ref ? ref->ctrl : NULL;
1392}
1393EXPORT_SYMBOL(v4l2_ctrl_find);
1394
1395/* Allocate a new v4l2_ctrl_ref and hook it into the handler. */
1396static int handler_new_ref(struct v4l2_ctrl_handler *hdl,
1397 struct v4l2_ctrl *ctrl)
1398{
1399 struct v4l2_ctrl_ref *ref;
1400 struct v4l2_ctrl_ref *new_ref;
1401 u32 id = ctrl->id;
1402 u32 class_ctrl = V4L2_CTRL_ID2CLASS(id) | 1;
1403 int bucket = id % hdl->nr_of_buckets; /* which bucket to use */
1404
1405 /* Automatically add the control class if it is not yet present. */
1406 if (id != class_ctrl && find_ref_lock(hdl, class_ctrl) == NULL)
1407 if (!v4l2_ctrl_new_std(hdl, NULL, class_ctrl, 0, 0, 0, 0))
1408 return hdl->error;
1409
1410 if (hdl->error)
1411 return hdl->error;
1412
1413 new_ref = kzalloc(sizeof(*new_ref), GFP_KERNEL);
1414 if (!new_ref)
1415 return handler_set_err(hdl, -ENOMEM);
1416 new_ref->ctrl = ctrl;
1417 if (ctrl->handler == hdl) {
1418 /* By default each control starts in a cluster of its own.
1419 new_ref->ctrl is basically a cluster array with one
1420 element, so that's perfect to use as the cluster pointer.
1421 But only do this for the handler that owns the control. */
1422 ctrl->cluster = &new_ref->ctrl;
1423 ctrl->ncontrols = 1;
1424 }
1425
1426 INIT_LIST_HEAD(&new_ref->node);
1427
1428 mutex_lock(hdl->lock);
1429
1430 /* Add immediately at the end of the list if the list is empty, or if
1431 the last element in the list has a lower ID.
1432 This ensures that when elements are added in ascending order the
1433 insertion is an O(1) operation. */
1434 if (list_empty(&hdl->ctrl_refs) || id > node2id(hdl->ctrl_refs.prev)) {
1435 list_add_tail(&new_ref->node, &hdl->ctrl_refs);
1436 goto insert_in_hash;
1437 }
1438
1439 /* Find insert position in sorted list */
1440 list_for_each_entry(ref, &hdl->ctrl_refs, node) {
1441 if (ref->ctrl->id < id)
1442 continue;
1443 /* Don't add duplicates */
1444 if (ref->ctrl->id == id) {
1445 kfree(new_ref);
1446 goto unlock;
1447 }
1448 list_add(&new_ref->node, ref->node.prev);
1449 break;
1450 }
1451
1452insert_in_hash:
1453 /* Insert the control node in the hash */
1454 new_ref->next = hdl->buckets[bucket];
1455 hdl->buckets[bucket] = new_ref;
1456
1457unlock:
1458 mutex_unlock(hdl->lock);
1459 return 0;
1460}
1461
1462/* Add a new control */
1463static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
1464 const struct v4l2_ctrl_ops *ops,
1465 u32 id, const char *name, enum v4l2_ctrl_type type,
1466 s32 min, s32 max, u32 step, s32 def,
1467 u32 flags, const char * const *qmenu,
1468 const s64 *qmenu_int, void *priv)
1469{
1470 struct v4l2_ctrl *ctrl;
1471 unsigned sz_extra = 0;
1472
1473 if (hdl->error)
1474 return NULL;
1475
1476 /* Sanity checks */
1477 if (id == 0 || name == NULL || id >= V4L2_CID_PRIVATE_BASE ||
1478 (type == V4L2_CTRL_TYPE_INTEGER && step == 0) ||
1479 (type == V4L2_CTRL_TYPE_BITMASK && max == 0) ||
1480 (type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) ||
1481 (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL) ||
1482 (type == V4L2_CTRL_TYPE_STRING && max == 0)) {
1483 handler_set_err(hdl, -ERANGE);
1484 return NULL;
1485 }
1486 if (type != V4L2_CTRL_TYPE_BITMASK && max < min) {
1487 handler_set_err(hdl, -ERANGE);
1488 return NULL;
1489 }
1490 if ((type == V4L2_CTRL_TYPE_INTEGER ||
1491 type == V4L2_CTRL_TYPE_MENU ||
1492 type == V4L2_CTRL_TYPE_INTEGER_MENU ||
1493 type == V4L2_CTRL_TYPE_BOOLEAN) &&
1494 (def < min || def > max)) {
1495 handler_set_err(hdl, -ERANGE);
1496 return NULL;
1497 }
1498 if (type == V4L2_CTRL_TYPE_BITMASK && ((def & ~max) || min || step)) {
1499 handler_set_err(hdl, -ERANGE);
1500 return NULL;
1501 }
1502
1503 if (type == V4L2_CTRL_TYPE_BUTTON)
1504 flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
1505 else if (type == V4L2_CTRL_TYPE_CTRL_CLASS)
1506 flags |= V4L2_CTRL_FLAG_READ_ONLY;
1507 else if (type == V4L2_CTRL_TYPE_STRING)
1508 sz_extra += 2 * (max + 1);
1509
1510 ctrl = kzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL);
1511 if (ctrl == NULL) {
1512 handler_set_err(hdl, -ENOMEM);
1513 return NULL;
1514 }
1515
1516 INIT_LIST_HEAD(&ctrl->node);
1517 INIT_LIST_HEAD(&ctrl->ev_subs);
1518 ctrl->handler = hdl;
1519 ctrl->ops = ops;
1520 ctrl->id = id;
1521 ctrl->name = name;
1522 ctrl->type = type;
1523 ctrl->flags = flags;
1524 ctrl->minimum = min;
1525 ctrl->maximum = max;
1526 ctrl->step = step;
1527 if (type == V4L2_CTRL_TYPE_MENU)
1528 ctrl->qmenu = qmenu;
1529 else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
1530 ctrl->qmenu_int = qmenu_int;
1531 ctrl->priv = priv;
1532 ctrl->cur.val = ctrl->val = ctrl->default_value = def;
1533
1534 if (ctrl->type == V4L2_CTRL_TYPE_STRING) {
1535 ctrl->cur.string = (char *)&ctrl[1] + sz_extra - (max + 1);
1536 ctrl->string = (char *)&ctrl[1] + sz_extra - 2 * (max + 1);
1537 if (ctrl->minimum)
1538 memset(ctrl->cur.string, ' ', ctrl->minimum);
1539 }
1540 if (handler_new_ref(hdl, ctrl)) {
1541 kfree(ctrl);
1542 return NULL;
1543 }
1544 mutex_lock(hdl->lock);
1545 list_add_tail(&ctrl->node, &hdl->ctrls);
1546 mutex_unlock(hdl->lock);
1547 return ctrl;
1548}
1549
1550struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
1551 const struct v4l2_ctrl_config *cfg, void *priv)
1552{
1553 bool is_menu;
1554 struct v4l2_ctrl *ctrl;
1555 const char *name = cfg->name;
1556 const char * const *qmenu = cfg->qmenu;
1557 const s64 *qmenu_int = cfg->qmenu_int;
1558 enum v4l2_ctrl_type type = cfg->type;
1559 u32 flags = cfg->flags;
1560 s32 min = cfg->min;
1561 s32 max = cfg->max;
1562 u32 step = cfg->step;
1563 s32 def = cfg->def;
1564
1565 if (name == NULL)
1566 v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
1567 &def, &flags);
1568
1569 is_menu = (cfg->type == V4L2_CTRL_TYPE_MENU ||
1570 cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU);
1571 if (is_menu)
1572 WARN_ON(step);
1573 else
1574 WARN_ON(cfg->menu_skip_mask);
1575 if (cfg->type == V4L2_CTRL_TYPE_MENU && qmenu == NULL)
1576 qmenu = v4l2_ctrl_get_menu(cfg->id);
1577 else if (cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU &&
1578 qmenu_int == NULL) {
1579 handler_set_err(hdl, -EINVAL);
1580 return NULL;
1581 }
1582
1583 ctrl = v4l2_ctrl_new(hdl, cfg->ops, cfg->id, name,
1584 type, min, max,
1585 is_menu ? cfg->menu_skip_mask : step,
1586 def, flags, qmenu, qmenu_int, priv);
1587 if (ctrl)
1588 ctrl->is_private = cfg->is_private;
1589 return ctrl;
1590}
1591EXPORT_SYMBOL(v4l2_ctrl_new_custom);
1592
1593/* Helper function for standard non-menu controls */
1594struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
1595 const struct v4l2_ctrl_ops *ops,
1596 u32 id, s32 min, s32 max, u32 step, s32 def)
1597{
1598 const char *name;
1599 enum v4l2_ctrl_type type;
1600 u32 flags;
1601
1602 v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
1603 if (type == V4L2_CTRL_TYPE_MENU
1604 || type == V4L2_CTRL_TYPE_INTEGER_MENU) {
1605 handler_set_err(hdl, -EINVAL);
1606 return NULL;
1607 }
1608 return v4l2_ctrl_new(hdl, ops, id, name, type,
1609 min, max, step, def, flags, NULL, NULL, NULL);
1610}
1611EXPORT_SYMBOL(v4l2_ctrl_new_std);
1612
1613/* Helper function for standard menu controls */
1614struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
1615 const struct v4l2_ctrl_ops *ops,
1616 u32 id, s32 max, s32 mask, s32 def)
1617{
1618 const char * const *qmenu = v4l2_ctrl_get_menu(id);
1619 const char *name;
1620 enum v4l2_ctrl_type type;
1621 s32 min;
1622 s32 step;
1623 u32 flags;
1624
1625 v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
1626 if (type != V4L2_CTRL_TYPE_MENU) {
1627 handler_set_err(hdl, -EINVAL);
1628 return NULL;
1629 }
1630 return v4l2_ctrl_new(hdl, ops, id, name, type,
1631 0, max, mask, def, flags, qmenu, NULL, NULL);
1632}
1633EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
1634
1635/* Helper function for standard integer menu controls */
1636struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
1637 const struct v4l2_ctrl_ops *ops,
1638 u32 id, s32 max, s32 def, const s64 *qmenu_int)
1639{
1640 const char *name;
1641 enum v4l2_ctrl_type type;
1642 s32 min;
1643 s32 step;
1644 u32 flags;
1645
1646 v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
1647 if (type != V4L2_CTRL_TYPE_INTEGER_MENU) {
1648 handler_set_err(hdl, -EINVAL);
1649 return NULL;
1650 }
1651 return v4l2_ctrl_new(hdl, ops, id, name, type,
1652 0, max, 0, def, flags, NULL, qmenu_int, NULL);
1653}
1654EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
1655
1656/* Add a control from another handler to this handler */
1657struct v4l2_ctrl *v4l2_ctrl_add_ctrl(struct v4l2_ctrl_handler *hdl,
1658 struct v4l2_ctrl *ctrl)
1659{
1660 if (hdl == NULL || hdl->error)
1661 return NULL;
1662 if (ctrl == NULL) {
1663 handler_set_err(hdl, -EINVAL);
1664 return NULL;
1665 }
1666 if (ctrl->handler == hdl)
1667 return ctrl;
1668 return handler_new_ref(hdl, ctrl) ? NULL : ctrl;
1669}
1670EXPORT_SYMBOL(v4l2_ctrl_add_ctrl);
1671
1672/* Add the controls from another handler to our own. */
1673int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
1674 struct v4l2_ctrl_handler *add)
1675{
1676 struct v4l2_ctrl_ref *ref;
1677 int ret = 0;
1678
1679 /* Do nothing if either handler is NULL or if they are the same */
1680 if (!hdl || !add || hdl == add)
1681 return 0;
1682 if (hdl->error)
1683 return hdl->error;
1684 mutex_lock(add->lock);
1685 list_for_each_entry(ref, &add->ctrl_refs, node) {
1686 struct v4l2_ctrl *ctrl = ref->ctrl;
1687
1688 /* Skip handler-private controls. */
1689 if (ctrl->is_private)
1690 continue;
1691 /* And control classes */
1692 if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
1693 continue;
1694 ret = handler_new_ref(hdl, ctrl);
1695 if (ret)
1696 break;
1697 }
1698 mutex_unlock(add->lock);
1699 return ret;
1700}
1701EXPORT_SYMBOL(v4l2_ctrl_add_handler);
1702
1703/* Cluster controls */
1704void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
1705{
1706 bool has_volatiles = false;
1707 int i;
1708
1709 /* The first control is the master control and it must not be NULL */
1710 BUG_ON(ncontrols == 0 || controls[0] == NULL);
1711
1712 for (i = 0; i < ncontrols; i++) {
1713 if (controls[i]) {
1714 controls[i]->cluster = controls;
1715 controls[i]->ncontrols = ncontrols;
1716 if (controls[i]->flags & V4L2_CTRL_FLAG_VOLATILE)
1717 has_volatiles = true;
1718 }
1719 }
1720 controls[0]->has_volatiles = has_volatiles;
1721}
1722EXPORT_SYMBOL(v4l2_ctrl_cluster);
1723
1724void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls,
1725 u8 manual_val, bool set_volatile)
1726{
1727 struct v4l2_ctrl *master = controls[0];
1728 u32 flag = 0;
1729 int i;
1730
1731 v4l2_ctrl_cluster(ncontrols, controls);
1732 WARN_ON(ncontrols <= 1);
1733 WARN_ON(manual_val < master->minimum || manual_val > master->maximum);
1734 WARN_ON(set_volatile && !has_op(master, g_volatile_ctrl));
1735 master->is_auto = true;
1736 master->has_volatiles = set_volatile;
1737 master->manual_mode_value = manual_val;
1738 master->flags |= V4L2_CTRL_FLAG_UPDATE;
1739
1740 if (!is_cur_manual(master))
1741 flag = V4L2_CTRL_FLAG_INACTIVE |
1742 (set_volatile ? V4L2_CTRL_FLAG_VOLATILE : 0);
1743
1744 for (i = 1; i < ncontrols; i++)
1745 if (controls[i])
1746 controls[i]->flags |= flag;
1747}
1748EXPORT_SYMBOL(v4l2_ctrl_auto_cluster);
1749
1750/* Activate/deactivate a control. */
1751void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active)
1752{
1753 /* invert since the actual flag is called 'inactive' */
1754 bool inactive = !active;
1755 bool old;
1756
1757 if (ctrl == NULL)
1758 return;
1759
1760 if (inactive)
1761 /* set V4L2_CTRL_FLAG_INACTIVE */
1762 old = test_and_set_bit(4, &ctrl->flags);
1763 else
1764 /* clear V4L2_CTRL_FLAG_INACTIVE */
1765 old = test_and_clear_bit(4, &ctrl->flags);
1766 if (old != inactive)
1767 send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
1768}
1769EXPORT_SYMBOL(v4l2_ctrl_activate);
1770
1771/* Grab/ungrab a control.
1772 Typically used when streaming starts and you want to grab controls,
1773 preventing the user from changing them.
1774
1775 Just call this and the framework will block any attempts to change
1776 these controls. */
1777void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
1778{
1779 bool old;
1780
1781 if (ctrl == NULL)
1782 return;
1783
1784 v4l2_ctrl_lock(ctrl);
1785 if (grabbed)
1786 /* set V4L2_CTRL_FLAG_GRABBED */
1787 old = test_and_set_bit(1, &ctrl->flags);
1788 else
1789 /* clear V4L2_CTRL_FLAG_GRABBED */
1790 old = test_and_clear_bit(1, &ctrl->flags);
1791 if (old != grabbed)
1792 send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
1793 v4l2_ctrl_unlock(ctrl);
1794}
1795EXPORT_SYMBOL(v4l2_ctrl_grab);
1796
1797/* Log the control name and value */
1798static void log_ctrl(const struct v4l2_ctrl *ctrl,
1799 const char *prefix, const char *colon)
1800{
1801 if (ctrl->flags & (V4L2_CTRL_FLAG_DISABLED | V4L2_CTRL_FLAG_WRITE_ONLY))
1802 return;
1803 if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
1804 return;
1805
1806 printk(KERN_INFO "%s%s%s: ", prefix, colon, ctrl->name);
1807
1808 switch (ctrl->type) {
1809 case V4L2_CTRL_TYPE_INTEGER:
1810 printk(KERN_CONT "%d", ctrl->cur.val);
1811 break;
1812 case V4L2_CTRL_TYPE_BOOLEAN:
1813 printk(KERN_CONT "%s", ctrl->cur.val ? "true" : "false");
1814 break;
1815 case V4L2_CTRL_TYPE_MENU:
1816 printk(KERN_CONT "%s", ctrl->qmenu[ctrl->cur.val]);
1817 break;
1818 case V4L2_CTRL_TYPE_INTEGER_MENU:
1819 printk(KERN_CONT "%lld", ctrl->qmenu_int[ctrl->cur.val]);
1820 break;
1821 case V4L2_CTRL_TYPE_BITMASK:
1822 printk(KERN_CONT "0x%08x", ctrl->cur.val);
1823 break;
1824 case V4L2_CTRL_TYPE_INTEGER64:
1825 printk(KERN_CONT "%lld", ctrl->cur.val64);
1826 break;
1827 case V4L2_CTRL_TYPE_STRING:
1828 printk(KERN_CONT "%s", ctrl->cur.string);
1829 break;
1830 default:
1831 printk(KERN_CONT "unknown type %d", ctrl->type);
1832 break;
1833 }
1834 if (ctrl->flags & (V4L2_CTRL_FLAG_INACTIVE |
1835 V4L2_CTRL_FLAG_GRABBED |
1836 V4L2_CTRL_FLAG_VOLATILE)) {
1837 if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
1838 printk(KERN_CONT " inactive");
1839 if (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)
1840 printk(KERN_CONT " grabbed");
1841 if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE)
1842 printk(KERN_CONT " volatile");
1843 }
1844 printk(KERN_CONT "\n");
1845}
1846
1847/* Log all controls owned by the handler */
1848void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
1849 const char *prefix)
1850{
1851 struct v4l2_ctrl *ctrl;
1852 const char *colon = "";
1853 int len;
1854
1855 if (hdl == NULL)
1856 return;
1857 if (prefix == NULL)
1858 prefix = "";
1859 len = strlen(prefix);
1860 if (len && prefix[len - 1] != ' ')
1861 colon = ": ";
1862 mutex_lock(hdl->lock);
1863 list_for_each_entry(ctrl, &hdl->ctrls, node)
1864 if (!(ctrl->flags & V4L2_CTRL_FLAG_DISABLED))
1865 log_ctrl(ctrl, prefix, colon);
1866 mutex_unlock(hdl->lock);
1867}
1868EXPORT_SYMBOL(v4l2_ctrl_handler_log_status);
1869
1870/* Call s_ctrl for all controls owned by the handler */
1871int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
1872{
1873 struct v4l2_ctrl *ctrl;
1874 int ret = 0;
1875
1876 if (hdl == NULL)
1877 return 0;
1878 mutex_lock(hdl->lock);
1879 list_for_each_entry(ctrl, &hdl->ctrls, node)
1880 ctrl->done = false;
1881
1882 list_for_each_entry(ctrl, &hdl->ctrls, node) {
1883 struct v4l2_ctrl *master = ctrl->cluster[0];
1884 int i;
1885
1886 /* Skip if this control was already handled by a cluster. */
1887 /* Skip button controls and read-only controls. */
1888 if (ctrl->done || ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
1889 (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
1890 continue;
1891
1892 for (i = 0; i < master->ncontrols; i++) {
1893 if (master->cluster[i]) {
1894 cur_to_new(master->cluster[i]);
1895 master->cluster[i]->is_new = 1;
1896 master->cluster[i]->done = true;
1897 }
1898 }
1899 ret = call_op(master, s_ctrl);
1900 if (ret)
1901 break;
1902 }
1903 mutex_unlock(hdl->lock);
1904 return ret;
1905}
1906EXPORT_SYMBOL(v4l2_ctrl_handler_setup);
1907
1908/* Implement VIDIOC_QUERYCTRL */
1909int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
1910{
1911 u32 id = qc->id & V4L2_CTRL_ID_MASK;
1912 struct v4l2_ctrl_ref *ref;
1913 struct v4l2_ctrl *ctrl;
1914
1915 if (hdl == NULL)
1916 return -EINVAL;
1917
1918 mutex_lock(hdl->lock);
1919
1920 /* Try to find it */
1921 ref = find_ref(hdl, id);
1922
1923 if ((qc->id & V4L2_CTRL_FLAG_NEXT_CTRL) && !list_empty(&hdl->ctrl_refs)) {
1924 /* Find the next control with ID > qc->id */
1925
1926 /* Did we reach the end of the control list? */
1927 if (id >= node2id(hdl->ctrl_refs.prev)) {
1928 ref = NULL; /* Yes, so there is no next control */
1929 } else if (ref) {
1930 /* We found a control with the given ID, so just get
1931 the next one in the list. */
1932 ref = list_entry(ref->node.next, typeof(*ref), node);
1933 } else {
1934 /* No control with the given ID exists, so start
1935 searching for the next largest ID. We know there
1936 is one, otherwise the first 'if' above would have
1937 been true. */
1938 list_for_each_entry(ref, &hdl->ctrl_refs, node)
1939 if (id < ref->ctrl->id)
1940 break;
1941 }
1942 }
1943 mutex_unlock(hdl->lock);
1944 if (!ref)
1945 return -EINVAL;
1946
1947 ctrl = ref->ctrl;
1948 memset(qc, 0, sizeof(*qc));
1949 if (id >= V4L2_CID_PRIVATE_BASE)
1950 qc->id = id;
1951 else
1952 qc->id = ctrl->id;
1953 strlcpy(qc->name, ctrl->name, sizeof(qc->name));
1954 qc->minimum = ctrl->minimum;
1955 qc->maximum = ctrl->maximum;
1956 qc->default_value = ctrl->default_value;
1957 if (ctrl->type == V4L2_CTRL_TYPE_MENU
1958 || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
1959 qc->step = 1;
1960 else
1961 qc->step = ctrl->step;
1962 qc->flags = ctrl->flags;
1963 qc->type = ctrl->type;
1964 return 0;
1965}
1966EXPORT_SYMBOL(v4l2_queryctrl);
1967
1968int v4l2_subdev_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
1969{
1970 if (qc->id & V4L2_CTRL_FLAG_NEXT_CTRL)
1971 return -EINVAL;
1972 return v4l2_queryctrl(sd->ctrl_handler, qc);
1973}
1974EXPORT_SYMBOL(v4l2_subdev_queryctrl);
1975
1976/* Implement VIDIOC_QUERYMENU */
1977int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
1978{
1979 struct v4l2_ctrl *ctrl;
1980 u32 i = qm->index;
1981
1982 ctrl = v4l2_ctrl_find(hdl, qm->id);
1983 if (!ctrl)
1984 return -EINVAL;
1985
1986 qm->reserved = 0;
1987 /* Sanity checks */
1988 switch (ctrl->type) {
1989 case V4L2_CTRL_TYPE_MENU:
1990 if (ctrl->qmenu == NULL)
1991 return -EINVAL;
1992 break;
1993 case V4L2_CTRL_TYPE_INTEGER_MENU:
1994 if (ctrl->qmenu_int == NULL)
1995 return -EINVAL;
1996 break;
1997 default:
1998 return -EINVAL;
1999 }
2000
2001 if (i < ctrl->minimum || i > ctrl->maximum)
2002 return -EINVAL;
2003
2004 /* Use mask to see if this menu item should be skipped */
2005 if (ctrl->menu_skip_mask & (1 << i))
2006 return -EINVAL;
2007 /* Empty menu items should also be skipped */
2008 if (ctrl->type == V4L2_CTRL_TYPE_MENU) {
2009 if (ctrl->qmenu[i] == NULL || ctrl->qmenu[i][0] == '\0')
2010 return -EINVAL;
2011 strlcpy(qm->name, ctrl->qmenu[i], sizeof(qm->name));
2012 } else {
2013 qm->value = ctrl->qmenu_int[i];
2014 }
2015 return 0;
2016}
2017EXPORT_SYMBOL(v4l2_querymenu);
2018
2019int v4l2_subdev_querymenu(struct v4l2_subdev *sd, struct v4l2_querymenu *qm)
2020{
2021 return v4l2_querymenu(sd->ctrl_handler, qm);
2022}
2023EXPORT_SYMBOL(v4l2_subdev_querymenu);
2024
2025
2026
2027/* Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS:
2028
2029 It is not a fully atomic operation, just best-effort only. After all, if
2030 multiple controls have to be set through multiple i2c writes (for example)
2031 then some initial writes may succeed while others fail. Thus leaving the
2032 system in an inconsistent state. The question is how much effort you are
2033 willing to spend on trying to make something atomic that really isn't.
2034
2035 From the point of view of an application the main requirement is that
2036 when you call VIDIOC_S_EXT_CTRLS and some values are invalid then an
2037 error should be returned without actually affecting any controls.
2038
2039 If all the values are correct, then it is acceptable to just give up
2040 in case of low-level errors.
2041
2042 It is important though that the application can tell when only a partial
2043 configuration was done. The way we do that is through the error_idx field
2044 of struct v4l2_ext_controls: if that is equal to the count field then no
2045 controls were affected. Otherwise all controls before that index were
2046 successful in performing their 'get' or 'set' operation, the control at
2047 the given index failed, and you don't know what happened with the controls
2048 after the failed one. Since if they were part of a control cluster they
2049 could have been successfully processed (if a cluster member was encountered
2050 at index < error_idx), they could have failed (if a cluster member was at
2051 error_idx), or they may not have been processed yet (if the first cluster
2052 member appeared after error_idx).
2053
2054 It is all fairly theoretical, though. In practice all you can do is to
2055 bail out. If error_idx == count, then it is an application bug. If
2056 error_idx < count then it is only an application bug if the error code was
2057 EBUSY. That usually means that something started streaming just when you
2058 tried to set the controls. In all other cases it is a driver/hardware
2059 problem and all you can do is to retry or bail out.
2060
2061 Note that these rules do not apply to VIDIOC_TRY_EXT_CTRLS: since that
2062 never modifies controls the error_idx is just set to whatever control
2063 has an invalid value.
2064 */
2065
2066/* Prepare for the extended g/s/try functions.
2067 Find the controls in the control array and do some basic checks. */
2068static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
2069 struct v4l2_ext_controls *cs,
2070 struct v4l2_ctrl_helper *helpers)
2071{
2072 struct v4l2_ctrl_helper *h;
2073 bool have_clusters = false;
2074 u32 i;
2075
2076 for (i = 0, h = helpers; i < cs->count; i++, h++) {
2077 struct v4l2_ext_control *c = &cs->controls[i];
2078 struct v4l2_ctrl_ref *ref;
2079 struct v4l2_ctrl *ctrl;
2080 u32 id = c->id & V4L2_CTRL_ID_MASK;
2081
2082 cs->error_idx = i;
2083
2084 if (cs->ctrl_class && V4L2_CTRL_ID2CLASS(id) != cs->ctrl_class)
2085 return -EINVAL;
2086
2087 /* Old-style private controls are not allowed for
2088 extended controls */
2089 if (id >= V4L2_CID_PRIVATE_BASE)
2090 return -EINVAL;
2091 ref = find_ref_lock(hdl, id);
2092 if (ref == NULL)
2093 return -EINVAL;
2094 ctrl = ref->ctrl;
2095 if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED)
2096 return -EINVAL;
2097
2098 if (ctrl->cluster[0]->ncontrols > 1)
2099 have_clusters = true;
2100 if (ctrl->cluster[0] != ctrl)
2101 ref = find_ref_lock(hdl, ctrl->cluster[0]->id);
2102 /* Store the ref to the master control of the cluster */
2103 h->mref = ref;
2104 h->ctrl = ctrl;
2105 /* Initially set next to 0, meaning that there is no other
2106 control in this helper array belonging to the same
2107 cluster */
2108 h->next = 0;
2109 }
2110
2111 /* We are done if there were no controls that belong to a multi-
2112 control cluster. */
2113 if (!have_clusters)
2114 return 0;
2115
2116 /* The code below figures out in O(n) time which controls in the list
2117 belong to the same cluster. */
2118
2119 /* This has to be done with the handler lock taken. */
2120 mutex_lock(hdl->lock);
2121
2122 /* First zero the helper field in the master control references */
2123 for (i = 0; i < cs->count; i++)
2124 helpers[i].mref->helper = NULL;
2125 for (i = 0, h = helpers; i < cs->count; i++, h++) {
2126 struct v4l2_ctrl_ref *mref = h->mref;
2127
2128 /* If the mref->helper is set, then it points to an earlier
2129 helper that belongs to the same cluster. */
2130 if (mref->helper) {
2131 /* Set the next field of mref->helper to the current
2132 index: this means that that earlier helper now
2133 points to the next helper in the same cluster. */
2134 mref->helper->next = i;
2135 /* mref should be set only for the first helper in the
2136 cluster, clear the others. */
2137 h->mref = NULL;
2138 }
2139 /* Point the mref helper to the current helper struct. */
2140 mref->helper = h;
2141 }
2142 mutex_unlock(hdl->lock);
2143 return 0;
2144}
2145
2146/* Handles the corner case where cs->count == 0. It checks whether the
2147 specified control class exists. If that class ID is 0, then it checks
2148 whether there are any controls at all. */
2149static int class_check(struct v4l2_ctrl_handler *hdl, u32 ctrl_class)
2150{
2151 if (ctrl_class == 0)
2152 return list_empty(&hdl->ctrl_refs) ? -EINVAL : 0;
2153 return find_ref_lock(hdl, ctrl_class | 1) ? 0 : -EINVAL;
2154}
2155
2156
2157
2158/* Get extended controls. Allocates the helpers array if needed. */
2159int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
2160{
2161 struct v4l2_ctrl_helper helper[4];
2162 struct v4l2_ctrl_helper *helpers = helper;
2163 int ret;
2164 int i, j;
2165
2166 cs->error_idx = cs->count;
2167 cs->ctrl_class = V4L2_CTRL_ID2CLASS(cs->ctrl_class);
2168
2169 if (hdl == NULL)
2170 return -EINVAL;
2171
2172 if (cs->count == 0)
2173 return class_check(hdl, cs->ctrl_class);
2174
2175 if (cs->count > ARRAY_SIZE(helper)) {
2176 helpers = kmalloc_array(cs->count, sizeof(helper[0]),
2177 GFP_KERNEL);
2178 if (helpers == NULL)
2179 return -ENOMEM;
2180 }
2181
2182 ret = prepare_ext_ctrls(hdl, cs, helpers);
2183 cs->error_idx = cs->count;
2184
2185 for (i = 0; !ret && i < cs->count; i++)
2186 if (helpers[i].ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
2187 ret = -EACCES;
2188
2189 for (i = 0; !ret && i < cs->count; i++) {
2190 int (*ctrl_to_user)(struct v4l2_ext_control *c,
2191 struct v4l2_ctrl *ctrl) = cur_to_user;
2192 struct v4l2_ctrl *master;
2193
2194 if (helpers[i].mref == NULL)
2195 continue;
2196
2197 master = helpers[i].mref->ctrl;
2198 cs->error_idx = i;
2199
2200 v4l2_ctrl_lock(master);
2201
2202 /* g_volatile_ctrl will update the new control values */
2203 if ((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
2204 (master->has_volatiles && !is_cur_manual(master))) {
2205 for (j = 0; j < master->ncontrols; j++)
2206 cur_to_new(master->cluster[j]);
2207 ret = call_op(master, g_volatile_ctrl);
2208 ctrl_to_user = new_to_user;
2209 }
2210 /* If OK, then copy the current (for non-volatile controls)
2211 or the new (for volatile controls) control values to the
2212 caller */
2213 if (!ret) {
2214 u32 idx = i;
2215
2216 do {
2217 ret = ctrl_to_user(cs->controls + idx,
2218 helpers[idx].ctrl);
2219 idx = helpers[idx].next;
2220 } while (!ret && idx);
2221 }
2222 v4l2_ctrl_unlock(master);
2223 }
2224
2225 if (cs->count > ARRAY_SIZE(helper))
2226 kfree(helpers);
2227 return ret;
2228}
2229EXPORT_SYMBOL(v4l2_g_ext_ctrls);
2230
2231int v4l2_subdev_g_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
2232{
2233 return v4l2_g_ext_ctrls(sd->ctrl_handler, cs);
2234}
2235EXPORT_SYMBOL(v4l2_subdev_g_ext_ctrls);
2236
2237/* Helper function to get a single control */
2238static int get_ctrl(struct v4l2_ctrl *ctrl, s32 *val)
2239{
2240 struct v4l2_ctrl *master = ctrl->cluster[0];
2241 int ret = 0;
2242 int i;
2243
2244 if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
2245 return -EACCES;
2246
2247 v4l2_ctrl_lock(master);
2248 /* g_volatile_ctrl will update the current control values */
2249 if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
2250 for (i = 0; i < master->ncontrols; i++)
2251 cur_to_new(master->cluster[i]);
2252 ret = call_op(master, g_volatile_ctrl);
2253 *val = ctrl->val;
2254 } else {
2255 *val = ctrl->cur.val;
2256 }
2257 v4l2_ctrl_unlock(master);
2258 return ret;
2259}
2260
2261int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control)
2262{
2263 struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
2264
2265 if (ctrl == NULL || !type_is_int(ctrl))
2266 return -EINVAL;
2267 return get_ctrl(ctrl, &control->value);
2268}
2269EXPORT_SYMBOL(v4l2_g_ctrl);
2270
2271int v4l2_subdev_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *control)
2272{
2273 return v4l2_g_ctrl(sd->ctrl_handler, control);
2274}
2275EXPORT_SYMBOL(v4l2_subdev_g_ctrl);
2276
2277s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl)
2278{
2279 s32 val = 0;
2280
2281 /* It's a driver bug if this happens. */
2282 WARN_ON(!type_is_int(ctrl));
2283 get_ctrl(ctrl, &val);
2284 return val;
2285}
2286EXPORT_SYMBOL(v4l2_ctrl_g_ctrl);
2287
2288
2289/* Core function that calls try/s_ctrl and ensures that the new value is
2290 copied to the current value on a set.
2291 Must be called with ctrl->handler->lock held. */
2292static int try_or_set_cluster(struct v4l2_fh *fh,
2293 struct v4l2_ctrl *master, bool set)
2294{
2295 bool update_flag;
2296 int ret;
2297 int i;
2298
2299 /* Go through the cluster and either validate the new value or
2300 (if no new value was set), copy the current value to the new
2301 value, ensuring a consistent view for the control ops when
2302 called. */
2303 for (i = 0; i < master->ncontrols; i++) {
2304 struct v4l2_ctrl *ctrl = master->cluster[i];
2305
2306 if (ctrl == NULL)
2307 continue;
2308
2309 if (!ctrl->is_new) {
2310 cur_to_new(ctrl);
2311 continue;
2312 }
2313 /* Check again: it may have changed since the
2314 previous check in try_or_set_ext_ctrls(). */
2315 if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
2316 return -EBUSY;
2317 }
2318
2319 ret = call_op(master, try_ctrl);
2320
2321 /* Don't set if there is no change */
2322 if (ret || !set || !cluster_changed(master))
2323 return ret;
2324 ret = call_op(master, s_ctrl);
2325 if (ret)
2326 return ret;
2327
2328 /* If OK, then make the new values permanent. */
2329 update_flag = is_cur_manual(master) != is_new_manual(master);
2330 for (i = 0; i < master->ncontrols; i++)
2331 new_to_cur(fh, master->cluster[i], update_flag && i > 0);
2332 return 0;
2333}
2334
2335/* Validate controls. */
2336static int validate_ctrls(struct v4l2_ext_controls *cs,
2337 struct v4l2_ctrl_helper *helpers, bool set)
2338{
2339 unsigned i;
2340 int ret = 0;
2341
2342 cs->error_idx = cs->count;
2343 for (i = 0; i < cs->count; i++) {
2344 struct v4l2_ctrl *ctrl = helpers[i].ctrl;
2345
2346 cs->error_idx = i;
2347
2348 if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
2349 return -EACCES;
2350 /* This test is also done in try_set_control_cluster() which
2351 is called in atomic context, so that has the final say,
2352 but it makes sense to do an up-front check as well. Once
2353 an error occurs in try_set_control_cluster() some other
2354 controls may have been set already and we want to do a
2355 best-effort to avoid that. */
2356 if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
2357 return -EBUSY;
2358 ret = validate_new(ctrl, &cs->controls[i]);
2359 if (ret)
2360 return ret;
2361 }
2362 return 0;
2363}
2364
2365/* Obtain the current volatile values of an autocluster and mark them
2366 as new. */
2367static void update_from_auto_cluster(struct v4l2_ctrl *master)
2368{
2369 int i;
2370
2371 for (i = 0; i < master->ncontrols; i++)
2372 cur_to_new(master->cluster[i]);
2373 if (!call_op(master, g_volatile_ctrl))
2374 for (i = 1; i < master->ncontrols; i++)
2375 if (master->cluster[i])
2376 master->cluster[i]->is_new = 1;
2377}
2378
2379/* Try or try-and-set controls */
2380static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
2381 struct v4l2_ext_controls *cs,
2382 bool set)
2383{
2384 struct v4l2_ctrl_helper helper[4];
2385 struct v4l2_ctrl_helper *helpers = helper;
2386 unsigned i, j;
2387 int ret;
2388
2389 cs->error_idx = cs->count;
2390 cs->ctrl_class = V4L2_CTRL_ID2CLASS(cs->ctrl_class);
2391
2392 if (hdl == NULL)
2393 return -EINVAL;
2394
2395 if (cs->count == 0)
2396 return class_check(hdl, cs->ctrl_class);
2397
2398 if (cs->count > ARRAY_SIZE(helper)) {
2399 helpers = kmalloc_array(cs->count, sizeof(helper[0]),
2400 GFP_KERNEL);
2401 if (!helpers)
2402 return -ENOMEM;
2403 }
2404 ret = prepare_ext_ctrls(hdl, cs, helpers);
2405 if (!ret)
2406 ret = validate_ctrls(cs, helpers, set);
2407 if (ret && set)
2408 cs->error_idx = cs->count;
2409 for (i = 0; !ret && i < cs->count; i++) {
2410 struct v4l2_ctrl *master;
2411 u32 idx = i;
2412
2413 if (helpers[i].mref == NULL)
2414 continue;
2415
2416 cs->error_idx = i;
2417 master = helpers[i].mref->ctrl;
2418 v4l2_ctrl_lock(master);
2419
2420 /* Reset the 'is_new' flags of the cluster */
2421 for (j = 0; j < master->ncontrols; j++)
2422 if (master->cluster[j])
2423 master->cluster[j]->is_new = 0;
2424
2425 /* For volatile autoclusters that are currently in auto mode
2426 we need to discover if it will be set to manual mode.
2427 If so, then we have to copy the current volatile values
2428 first since those will become the new manual values (which
2429 may be overwritten by explicit new values from this set
2430 of controls). */
2431 if (master->is_auto && master->has_volatiles &&
2432 !is_cur_manual(master)) {
2433 /* Pick an initial non-manual value */
2434 s32 new_auto_val = master->manual_mode_value + 1;
2435 u32 tmp_idx = idx;
2436
2437 do {
2438 /* Check if the auto control is part of the
2439 list, and remember the new value. */
2440 if (helpers[tmp_idx].ctrl == master)
2441 new_auto_val = cs->controls[tmp_idx].value;
2442 tmp_idx = helpers[tmp_idx].next;
2443 } while (tmp_idx);
2444 /* If the new value == the manual value, then copy
2445 the current volatile values. */
2446 if (new_auto_val == master->manual_mode_value)
2447 update_from_auto_cluster(master);
2448 }
2449
2450 /* Copy the new caller-supplied control values.
2451 user_to_new() sets 'is_new' to 1. */
2452 do {
2453 ret = user_to_new(cs->controls + idx, helpers[idx].ctrl);
2454 idx = helpers[idx].next;
2455 } while (!ret && idx);
2456
2457 if (!ret)
2458 ret = try_or_set_cluster(fh, master, set);
2459
2460 /* Copy the new values back to userspace. */
2461 if (!ret) {
2462 idx = i;
2463 do {
2464 ret = new_to_user(cs->controls + idx,
2465 helpers[idx].ctrl);
2466 idx = helpers[idx].next;
2467 } while (!ret && idx);
2468 }
2469 v4l2_ctrl_unlock(master);
2470 }
2471
2472 if (cs->count > ARRAY_SIZE(helper))
2473 kfree(helpers);
2474 return ret;
2475}
2476
2477int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
2478{
2479 return try_set_ext_ctrls(NULL, hdl, cs, false);
2480}
2481EXPORT_SYMBOL(v4l2_try_ext_ctrls);
2482
2483int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
2484 struct v4l2_ext_controls *cs)
2485{
2486 return try_set_ext_ctrls(fh, hdl, cs, true);
2487}
2488EXPORT_SYMBOL(v4l2_s_ext_ctrls);
2489
2490int v4l2_subdev_try_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
2491{
2492 return try_set_ext_ctrls(NULL, sd->ctrl_handler, cs, false);
2493}
2494EXPORT_SYMBOL(v4l2_subdev_try_ext_ctrls);
2495
2496int v4l2_subdev_s_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
2497{
2498 return try_set_ext_ctrls(NULL, sd->ctrl_handler, cs, true);
2499}
2500EXPORT_SYMBOL(v4l2_subdev_s_ext_ctrls);
2501
2502/* Helper function for VIDIOC_S_CTRL compatibility */
2503static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, s32 *val)
2504{
2505 struct v4l2_ctrl *master = ctrl->cluster[0];
2506 int ret;
2507 int i;
2508
2509 ret = validate_new_int(ctrl, val);
2510 if (ret)
2511 return ret;
2512
2513 v4l2_ctrl_lock(ctrl);
2514
2515 /* Reset the 'is_new' flags of the cluster */
2516 for (i = 0; i < master->ncontrols; i++)
2517 if (master->cluster[i])
2518 master->cluster[i]->is_new = 0;
2519
2520 /* For autoclusters with volatiles that are switched from auto to
2521 manual mode we have to update the current volatile values since
2522 those will become the initial manual values after such a switch. */
2523 if (master->is_auto && master->has_volatiles && ctrl == master &&
2524 !is_cur_manual(master) && *val == master->manual_mode_value)
2525 update_from_auto_cluster(master);
2526 ctrl->val = *val;
2527 ctrl->is_new = 1;
2528 ret = try_or_set_cluster(fh, master, true);
2529 *val = ctrl->cur.val;
2530 v4l2_ctrl_unlock(ctrl);
2531 return ret;
2532}
2533
2534int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
2535 struct v4l2_control *control)
2536{
2537 struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
2538
2539 if (ctrl == NULL || !type_is_int(ctrl))
2540 return -EINVAL;
2541
2542 if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
2543 return -EACCES;
2544
2545 return set_ctrl(fh, ctrl, &control->value);
2546}
2547EXPORT_SYMBOL(v4l2_s_ctrl);
2548
2549int v4l2_subdev_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *control)
2550{
2551 return v4l2_s_ctrl(NULL, sd->ctrl_handler, control);
2552}
2553EXPORT_SYMBOL(v4l2_subdev_s_ctrl);
2554
2555int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
2556{
2557 /* It's a driver bug if this happens. */
2558 WARN_ON(!type_is_int(ctrl));
2559 return set_ctrl(NULL, ctrl, &val);
2560}
2561EXPORT_SYMBOL(v4l2_ctrl_s_ctrl);
2562
2563static int v4l2_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
2564{
2565 struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
2566
2567 if (ctrl == NULL)
2568 return -EINVAL;
2569
2570 v4l2_ctrl_lock(ctrl);
2571 list_add_tail(&sev->node, &ctrl->ev_subs);
2572 if (ctrl->type != V4L2_CTRL_TYPE_CTRL_CLASS &&
2573 (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL)) {
2574 struct v4l2_event ev;
2575 u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
2576
2577 if (!(ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY))
2578 changes |= V4L2_EVENT_CTRL_CH_VALUE;
2579 fill_event(&ev, ctrl, changes);
2580 /* Mark the queue as active, allowing this initial
2581 event to be accepted. */
2582 sev->elems = elems;
2583 v4l2_event_queue_fh(sev->fh, &ev);
2584 }
2585 v4l2_ctrl_unlock(ctrl);
2586 return 0;
2587}
2588
2589static void v4l2_ctrl_del_event(struct v4l2_subscribed_event *sev)
2590{
2591 struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
2592
2593 v4l2_ctrl_lock(ctrl);
2594 list_del(&sev->node);
2595 v4l2_ctrl_unlock(ctrl);
2596}
2597
2598void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new)
2599{
2600 u32 old_changes = old->u.ctrl.changes;
2601
2602 old->u.ctrl = new->u.ctrl;
2603 old->u.ctrl.changes |= old_changes;
2604}
2605EXPORT_SYMBOL(v4l2_ctrl_replace);
2606
2607void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new)
2608{
2609 new->u.ctrl.changes |= old->u.ctrl.changes;
2610}
2611EXPORT_SYMBOL(v4l2_ctrl_merge);
2612
2613const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops = {
2614 .add = v4l2_ctrl_add_event,
2615 .del = v4l2_ctrl_del_event,
2616 .replace = v4l2_ctrl_replace,
2617 .merge = v4l2_ctrl_merge,
2618};
2619EXPORT_SYMBOL(v4l2_ctrl_sub_ev_ops);
2620
2621int v4l2_ctrl_log_status(struct file *file, void *fh)
2622{
2623 struct video_device *vfd = video_devdata(file);
2624 struct v4l2_fh *vfh = file->private_data;
2625
2626 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) && vfd->v4l2_dev)
2627 v4l2_ctrl_handler_log_status(vfh->ctrl_handler,
2628 vfd->v4l2_dev->name);
2629 return 0;
2630}
2631EXPORT_SYMBOL(v4l2_ctrl_log_status);
2632
2633int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
2634 struct v4l2_event_subscription *sub)
2635{
2636 if (sub->type == V4L2_EVENT_CTRL)
2637 return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops);
2638 return -EINVAL;
2639}
2640EXPORT_SYMBOL(v4l2_ctrl_subscribe_event);
2641
2642unsigned int v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
2643{
2644 struct v4l2_fh *fh = file->private_data;
2645
2646 if (v4l2_event_pending(fh))
2647 return POLLPRI;
2648 poll_wait(file, &fh->wait, wait);
2649 return 0;
2650}
2651EXPORT_SYMBOL(v4l2_ctrl_poll);
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
new file mode 100644
index 000000000000..71237f5f85f4
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -0,0 +1,1003 @@
1/*
2 * Video capture interface for Linux version 2
3 *
4 * A generic video device interface for the LINUX operating system
5 * using a set of device structures/vectors for low level operations.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1)
13 * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2)
14 *
15 * Fixes: 20000516 Claudio Matsuoka <claudio@conectiva.com>
16 * - Added procfs support
17 */
18
19#include <linux/module.h>
20#include <linux/types.h>
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/string.h>
24#include <linux/errno.h>
25#include <linux/init.h>
26#include <linux/kmod.h>
27#include <linux/slab.h>
28#include <asm/uaccess.h>
29
30#include <media/v4l2-common.h>
31#include <media/v4l2-device.h>
32#include <media/v4l2-ioctl.h>
33
34#define VIDEO_NUM_DEVICES 256
35#define VIDEO_NAME "video4linux"
36
37/*
38 * sysfs stuff
39 */
40
41static ssize_t show_index(struct device *cd,
42 struct device_attribute *attr, char *buf)
43{
44 struct video_device *vdev = to_video_device(cd);
45
46 return sprintf(buf, "%i\n", vdev->index);
47}
48
49static ssize_t show_debug(struct device *cd,
50 struct device_attribute *attr, char *buf)
51{
52 struct video_device *vdev = to_video_device(cd);
53
54 return sprintf(buf, "%i\n", vdev->debug);
55}
56
57static ssize_t set_debug(struct device *cd, struct device_attribute *attr,
58 const char *buf, size_t len)
59{
60 struct video_device *vdev = to_video_device(cd);
61 int res = 0;
62 u16 value;
63
64 res = kstrtou16(buf, 0, &value);
65 if (res)
66 return res;
67
68 vdev->debug = value;
69 return len;
70}
71
72static ssize_t show_name(struct device *cd,
73 struct device_attribute *attr, char *buf)
74{
75 struct video_device *vdev = to_video_device(cd);
76
77 return sprintf(buf, "%.*s\n", (int)sizeof(vdev->name), vdev->name);
78}
79
80static struct device_attribute video_device_attrs[] = {
81 __ATTR(name, S_IRUGO, show_name, NULL),
82 __ATTR(debug, 0644, show_debug, set_debug),
83 __ATTR(index, S_IRUGO, show_index, NULL),
84 __ATTR_NULL
85};
86
87/*
88 * Active devices
89 */
90static struct video_device *video_device[VIDEO_NUM_DEVICES];
91static DEFINE_MUTEX(videodev_lock);
92static DECLARE_BITMAP(devnode_nums[VFL_TYPE_MAX], VIDEO_NUM_DEVICES);
93
94/* Device node utility functions */
95
96/* Note: these utility functions all assume that vfl_type is in the range
97 [0, VFL_TYPE_MAX-1]. */
98
99#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES
100/* Return the bitmap corresponding to vfl_type. */
101static inline unsigned long *devnode_bits(int vfl_type)
102{
103 /* Any types not assigned to fixed minor ranges must be mapped to
104 one single bitmap for the purposes of finding a free node number
105 since all those unassigned types use the same minor range. */
106 int idx = (vfl_type > VFL_TYPE_RADIO) ? VFL_TYPE_MAX - 1 : vfl_type;
107
108 return devnode_nums[idx];
109}
110#else
111/* Return the bitmap corresponding to vfl_type. */
112static inline unsigned long *devnode_bits(int vfl_type)
113{
114 return devnode_nums[vfl_type];
115}
116#endif
117
118/* Mark device node number vdev->num as used */
119static inline void devnode_set(struct video_device *vdev)
120{
121 set_bit(vdev->num, devnode_bits(vdev->vfl_type));
122}
123
124/* Mark device node number vdev->num as unused */
125static inline void devnode_clear(struct video_device *vdev)
126{
127 clear_bit(vdev->num, devnode_bits(vdev->vfl_type));
128}
129
130/* Try to find a free device node number in the range [from, to> */
131static inline int devnode_find(struct video_device *vdev, int from, int to)
132{
133 return find_next_zero_bit(devnode_bits(vdev->vfl_type), to, from);
134}
135
136struct video_device *video_device_alloc(void)
137{
138 return kzalloc(sizeof(struct video_device), GFP_KERNEL);
139}
140EXPORT_SYMBOL(video_device_alloc);
141
142void video_device_release(struct video_device *vdev)
143{
144 kfree(vdev);
145}
146EXPORT_SYMBOL(video_device_release);
147
148void video_device_release_empty(struct video_device *vdev)
149{
150 /* Do nothing */
151 /* Only valid when the video_device struct is a static. */
152}
153EXPORT_SYMBOL(video_device_release_empty);
154
155static inline void video_get(struct video_device *vdev)
156{
157 get_device(&vdev->dev);
158}
159
160static inline void video_put(struct video_device *vdev)
161{
162 put_device(&vdev->dev);
163}
164
165/* Called when the last user of the video device exits. */
166static void v4l2_device_release(struct device *cd)
167{
168 struct video_device *vdev = to_video_device(cd);
169 struct v4l2_device *v4l2_dev = vdev->v4l2_dev;
170
171 mutex_lock(&videodev_lock);
172 if (WARN_ON(video_device[vdev->minor] != vdev)) {
173 /* should not happen */
174 mutex_unlock(&videodev_lock);
175 return;
176 }
177
178 /* Free up this device for reuse */
179 video_device[vdev->minor] = NULL;
180
181 /* Delete the cdev on this minor as well */
182 cdev_del(vdev->cdev);
183 /* Just in case some driver tries to access this from
184 the release() callback. */
185 vdev->cdev = NULL;
186
187 /* Mark device node number as free */
188 devnode_clear(vdev);
189
190 mutex_unlock(&videodev_lock);
191
192#if defined(CONFIG_MEDIA_CONTROLLER)
193 if (v4l2_dev && v4l2_dev->mdev &&
194 vdev->vfl_type != VFL_TYPE_SUBDEV)
195 media_device_unregister_entity(&vdev->entity);
196#endif
197
198 /* Do not call v4l2_device_put if there is no release callback set.
199 * Drivers that have no v4l2_device release callback might free the
200 * v4l2_dev instance in the video_device release callback below, so we
201 * must perform this check here.
202 *
203 * TODO: In the long run all drivers that use v4l2_device should use the
204 * v4l2_device release callback. This check will then be unnecessary.
205 */
206 if (v4l2_dev && v4l2_dev->release == NULL)
207 v4l2_dev = NULL;
208
209 /* Release video_device and perform other
210 cleanups as needed. */
211 vdev->release(vdev);
212
213 /* Decrease v4l2_device refcount */
214 if (v4l2_dev)
215 v4l2_device_put(v4l2_dev);
216}
217
218static struct class video_class = {
219 .name = VIDEO_NAME,
220 .dev_attrs = video_device_attrs,
221};
222
223struct video_device *video_devdata(struct file *file)
224{
225 return video_device[iminor(file->f_path.dentry->d_inode)];
226}
227EXPORT_SYMBOL(video_devdata);
228
229
230/* Priority handling */
231
232static inline bool prio_is_valid(enum v4l2_priority prio)
233{
234 return prio == V4L2_PRIORITY_BACKGROUND ||
235 prio == V4L2_PRIORITY_INTERACTIVE ||
236 prio == V4L2_PRIORITY_RECORD;
237}
238
239void v4l2_prio_init(struct v4l2_prio_state *global)
240{
241 memset(global, 0, sizeof(*global));
242}
243EXPORT_SYMBOL(v4l2_prio_init);
244
245int v4l2_prio_change(struct v4l2_prio_state *global, enum v4l2_priority *local,
246 enum v4l2_priority new)
247{
248 if (!prio_is_valid(new))
249 return -EINVAL;
250 if (*local == new)
251 return 0;
252
253 atomic_inc(&global->prios[new]);
254 if (prio_is_valid(*local))
255 atomic_dec(&global->prios[*local]);
256 *local = new;
257 return 0;
258}
259EXPORT_SYMBOL(v4l2_prio_change);
260
261void v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local)
262{
263 v4l2_prio_change(global, local, V4L2_PRIORITY_DEFAULT);
264}
265EXPORT_SYMBOL(v4l2_prio_open);
266
267void v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority local)
268{
269 if (prio_is_valid(local))
270 atomic_dec(&global->prios[local]);
271}
272EXPORT_SYMBOL(v4l2_prio_close);
273
274enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global)
275{
276 if (atomic_read(&global->prios[V4L2_PRIORITY_RECORD]) > 0)
277 return V4L2_PRIORITY_RECORD;
278 if (atomic_read(&global->prios[V4L2_PRIORITY_INTERACTIVE]) > 0)
279 return V4L2_PRIORITY_INTERACTIVE;
280 if (atomic_read(&global->prios[V4L2_PRIORITY_BACKGROUND]) > 0)
281 return V4L2_PRIORITY_BACKGROUND;
282 return V4L2_PRIORITY_UNSET;
283}
284EXPORT_SYMBOL(v4l2_prio_max);
285
286int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local)
287{
288 return (local < v4l2_prio_max(global)) ? -EBUSY : 0;
289}
290EXPORT_SYMBOL(v4l2_prio_check);
291
292
293static ssize_t v4l2_read(struct file *filp, char __user *buf,
294 size_t sz, loff_t *off)
295{
296 struct video_device *vdev = video_devdata(filp);
297 int ret = -ENODEV;
298
299 if (!vdev->fops->read)
300 return -EINVAL;
301 if (video_is_registered(vdev))
302 ret = vdev->fops->read(filp, buf, sz, off);
303 if (vdev->debug)
304 printk(KERN_DEBUG "%s: read: %zd (%d)\n",
305 video_device_node_name(vdev), sz, ret);
306 return ret;
307}
308
309static ssize_t v4l2_write(struct file *filp, const char __user *buf,
310 size_t sz, loff_t *off)
311{
312 struct video_device *vdev = video_devdata(filp);
313 int ret = -ENODEV;
314
315 if (!vdev->fops->write)
316 return -EINVAL;
317 if (video_is_registered(vdev))
318 ret = vdev->fops->write(filp, buf, sz, off);
319 if (vdev->debug)
320 printk(KERN_DEBUG "%s: write: %zd (%d)\n",
321 video_device_node_name(vdev), sz, ret);
322 return ret;
323}
324
325static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll)
326{
327 struct video_device *vdev = video_devdata(filp);
328 unsigned int res = POLLERR | POLLHUP;
329
330 if (!vdev->fops->poll)
331 return DEFAULT_POLLMASK;
332 if (video_is_registered(vdev))
333 res = vdev->fops->poll(filp, poll);
334 if (vdev->debug)
335 printk(KERN_DEBUG "%s: poll: %08x\n",
336 video_device_node_name(vdev), res);
337 return res;
338}
339
340static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
341{
342 struct video_device *vdev = video_devdata(filp);
343 int ret = -ENODEV;
344
345 if (vdev->fops->unlocked_ioctl) {
346 struct mutex *lock = v4l2_ioctl_get_lock(vdev, cmd);
347
348 if (lock && mutex_lock_interruptible(lock))
349 return -ERESTARTSYS;
350 if (video_is_registered(vdev))
351 ret = vdev->fops->unlocked_ioctl(filp, cmd, arg);
352 if (lock)
353 mutex_unlock(lock);
354 } else if (vdev->fops->ioctl) {
355 /* This code path is a replacement for the BKL. It is a major
356 * hack but it will have to do for those drivers that are not
357 * yet converted to use unlocked_ioctl.
358 *
359 * There are two options: if the driver implements struct
360 * v4l2_device, then the lock defined there is used to
361 * serialize the ioctls. Otherwise the v4l2 core lock defined
362 * below is used. This lock is really bad since it serializes
363 * completely independent devices.
364 *
365 * Both variants suffer from the same problem: if the driver
366 * sleeps, then it blocks all ioctls since the lock is still
367 * held. This is very common for VIDIOC_DQBUF since that
368 * normally waits for a frame to arrive. As a result any other
369 * ioctl calls will proceed very, very slowly since each call
370 * will have to wait for the VIDIOC_QBUF to finish. Things that
371 * should take 0.01s may now take 10-20 seconds.
372 *
373 * The workaround is to *not* take the lock for VIDIOC_DQBUF.
374 * This actually works OK for videobuf-based drivers, since
375 * videobuf will take its own internal lock.
376 */
377 static DEFINE_MUTEX(v4l2_ioctl_mutex);
378 struct mutex *m = vdev->v4l2_dev ?
379 &vdev->v4l2_dev->ioctl_lock : &v4l2_ioctl_mutex;
380
381 if (cmd != VIDIOC_DQBUF && mutex_lock_interruptible(m))
382 return -ERESTARTSYS;
383 if (video_is_registered(vdev))
384 ret = vdev->fops->ioctl(filp, cmd, arg);
385 if (cmd != VIDIOC_DQBUF)
386 mutex_unlock(m);
387 } else
388 ret = -ENOTTY;
389
390 return ret;
391}
392
393#ifdef CONFIG_MMU
394#define v4l2_get_unmapped_area NULL
395#else
396static unsigned long v4l2_get_unmapped_area(struct file *filp,
397 unsigned long addr, unsigned long len, unsigned long pgoff,
398 unsigned long flags)
399{
400 struct video_device *vdev = video_devdata(filp);
401 int ret;
402
403 if (!vdev->fops->get_unmapped_area)
404 return -ENOSYS;
405 if (!video_is_registered(vdev))
406 return -ENODEV;
407 ret = vdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags);
408 if (vdev->debug)
409 printk(KERN_DEBUG "%s: get_unmapped_area (%d)\n",
410 video_device_node_name(vdev), ret);
411 return ret;
412}
413#endif
414
415static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
416{
417 struct video_device *vdev = video_devdata(filp);
418 int ret = -ENODEV;
419
420 if (!vdev->fops->mmap)
421 return -ENODEV;
422 if (video_is_registered(vdev))
423 ret = vdev->fops->mmap(filp, vm);
424 if (vdev->debug)
425 printk(KERN_DEBUG "%s: mmap (%d)\n",
426 video_device_node_name(vdev), ret);
427 return ret;
428}
429
430/* Override for the open function */
431static int v4l2_open(struct inode *inode, struct file *filp)
432{
433 struct video_device *vdev;
434 int ret = 0;
435
436 /* Check if the video device is available */
437 mutex_lock(&videodev_lock);
438 vdev = video_devdata(filp);
439 /* return ENODEV if the video device has already been removed. */
440 if (vdev == NULL || !video_is_registered(vdev)) {
441 mutex_unlock(&videodev_lock);
442 return -ENODEV;
443 }
444 /* and increase the device refcount */
445 video_get(vdev);
446 mutex_unlock(&videodev_lock);
447 if (vdev->fops->open) {
448 if (video_is_registered(vdev))
449 ret = vdev->fops->open(filp);
450 else
451 ret = -ENODEV;
452 }
453
454 if (vdev->debug)
455 printk(KERN_DEBUG "%s: open (%d)\n",
456 video_device_node_name(vdev), ret);
457 /* decrease the refcount in case of an error */
458 if (ret)
459 video_put(vdev);
460 return ret;
461}
462
463/* Override for the release function */
464static int v4l2_release(struct inode *inode, struct file *filp)
465{
466 struct video_device *vdev = video_devdata(filp);
467 int ret = 0;
468
469 if (vdev->fops->release)
470 ret = vdev->fops->release(filp);
471 if (vdev->debug)
472 printk(KERN_DEBUG "%s: release\n",
473 video_device_node_name(vdev));
474
475 /* decrease the refcount unconditionally since the release()
476 return value is ignored. */
477 video_put(vdev);
478 return ret;
479}
480
481static const struct file_operations v4l2_fops = {
482 .owner = THIS_MODULE,
483 .read = v4l2_read,
484 .write = v4l2_write,
485 .open = v4l2_open,
486 .get_unmapped_area = v4l2_get_unmapped_area,
487 .mmap = v4l2_mmap,
488 .unlocked_ioctl = v4l2_ioctl,
489#ifdef CONFIG_COMPAT
490 .compat_ioctl = v4l2_compat_ioctl32,
491#endif
492 .release = v4l2_release,
493 .poll = v4l2_poll,
494 .llseek = no_llseek,
495};
496
497/**
498 * get_index - assign stream index number based on parent device
499 * @vdev: video_device to assign index number to, vdev->parent should be assigned
500 *
501 * Note that when this is called the new device has not yet been registered
502 * in the video_device array, but it was able to obtain a minor number.
503 *
504 * This means that we can always obtain a free stream index number since
505 * the worst case scenario is that there are VIDEO_NUM_DEVICES - 1 slots in
506 * use of the video_device array.
507 *
508 * Returns a free index number.
509 */
510static int get_index(struct video_device *vdev)
511{
512 /* This can be static since this function is called with the global
513 videodev_lock held. */
514 static DECLARE_BITMAP(used, VIDEO_NUM_DEVICES);
515 int i;
516
517 /* Some drivers do not set the parent. In that case always return 0. */
518 if (vdev->parent == NULL)
519 return 0;
520
521 bitmap_zero(used, VIDEO_NUM_DEVICES);
522
523 for (i = 0; i < VIDEO_NUM_DEVICES; i++) {
524 if (video_device[i] != NULL &&
525 video_device[i]->parent == vdev->parent) {
526 set_bit(video_device[i]->index, used);
527 }
528 }
529
530 return find_first_zero_bit(used, VIDEO_NUM_DEVICES);
531}
532
533#define SET_VALID_IOCTL(ops, cmd, op) \
534 if (ops->op) \
535 set_bit(_IOC_NR(cmd), valid_ioctls)
536
537/* This determines which ioctls are actually implemented in the driver.
538 It's a one-time thing which simplifies video_ioctl2 as it can just do
539 a bit test.
540
541 Note that drivers can override this by setting bits to 1 in
542 vdev->valid_ioctls. If an ioctl is marked as 1 when this function is
543 called, then that ioctl will actually be marked as unimplemented.
544
545 It does that by first setting up the local valid_ioctls bitmap, and
546 at the end do a:
547
548 vdev->valid_ioctls = valid_ioctls & ~(vdev->valid_ioctls)
549 */
550static void determine_valid_ioctls(struct video_device *vdev)
551{
552 DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE);
553 const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
554
555 bitmap_zero(valid_ioctls, BASE_VIDIOC_PRIVATE);
556
557 SET_VALID_IOCTL(ops, VIDIOC_QUERYCAP, vidioc_querycap);
558 if (ops->vidioc_g_priority ||
559 test_bit(V4L2_FL_USE_FH_PRIO, &vdev->flags))
560 set_bit(_IOC_NR(VIDIOC_G_PRIORITY), valid_ioctls);
561 if (ops->vidioc_s_priority ||
562 test_bit(V4L2_FL_USE_FH_PRIO, &vdev->flags))
563 set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls);
564 if (ops->vidioc_enum_fmt_vid_cap ||
565 ops->vidioc_enum_fmt_vid_out ||
566 ops->vidioc_enum_fmt_vid_cap_mplane ||
567 ops->vidioc_enum_fmt_vid_out_mplane ||
568 ops->vidioc_enum_fmt_vid_overlay ||
569 ops->vidioc_enum_fmt_type_private)
570 set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
571 if (ops->vidioc_g_fmt_vid_cap ||
572 ops->vidioc_g_fmt_vid_out ||
573 ops->vidioc_g_fmt_vid_cap_mplane ||
574 ops->vidioc_g_fmt_vid_out_mplane ||
575 ops->vidioc_g_fmt_vid_overlay ||
576 ops->vidioc_g_fmt_vbi_cap ||
577 ops->vidioc_g_fmt_vid_out_overlay ||
578 ops->vidioc_g_fmt_vbi_out ||
579 ops->vidioc_g_fmt_sliced_vbi_cap ||
580 ops->vidioc_g_fmt_sliced_vbi_out ||
581 ops->vidioc_g_fmt_type_private)
582 set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
583 if (ops->vidioc_s_fmt_vid_cap ||
584 ops->vidioc_s_fmt_vid_out ||
585 ops->vidioc_s_fmt_vid_cap_mplane ||
586 ops->vidioc_s_fmt_vid_out_mplane ||
587 ops->vidioc_s_fmt_vid_overlay ||
588 ops->vidioc_s_fmt_vbi_cap ||
589 ops->vidioc_s_fmt_vid_out_overlay ||
590 ops->vidioc_s_fmt_vbi_out ||
591 ops->vidioc_s_fmt_sliced_vbi_cap ||
592 ops->vidioc_s_fmt_sliced_vbi_out ||
593 ops->vidioc_s_fmt_type_private)
594 set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
595 if (ops->vidioc_try_fmt_vid_cap ||
596 ops->vidioc_try_fmt_vid_out ||
597 ops->vidioc_try_fmt_vid_cap_mplane ||
598 ops->vidioc_try_fmt_vid_out_mplane ||
599 ops->vidioc_try_fmt_vid_overlay ||
600 ops->vidioc_try_fmt_vbi_cap ||
601 ops->vidioc_try_fmt_vid_out_overlay ||
602 ops->vidioc_try_fmt_vbi_out ||
603 ops->vidioc_try_fmt_sliced_vbi_cap ||
604 ops->vidioc_try_fmt_sliced_vbi_out ||
605 ops->vidioc_try_fmt_type_private)
606 set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
607 SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
608 SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
609 SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
610 SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
611 SET_VALID_IOCTL(ops, VIDIOC_OVERLAY, vidioc_overlay);
612 SET_VALID_IOCTL(ops, VIDIOC_G_FBUF, vidioc_g_fbuf);
613 SET_VALID_IOCTL(ops, VIDIOC_S_FBUF, vidioc_s_fbuf);
614 SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon);
615 SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
616 if (vdev->tvnorms)
617 set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
618 if (ops->vidioc_g_std || vdev->current_norm)
619 set_bit(_IOC_NR(VIDIOC_G_STD), valid_ioctls);
620 SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std);
621 SET_VALID_IOCTL(ops, VIDIOC_QUERYSTD, vidioc_querystd);
622 SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
623 SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
624 SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
625 SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output);
626 SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output);
627 SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output);
628 /* Note: the control handler can also be passed through the filehandle,
629 and that can't be tested here. If the bit for these control ioctls
630 is set, then the ioctl is valid. But if it is 0, then it can still
631 be valid if the filehandle passed the control handler. */
632 if (vdev->ctrl_handler || ops->vidioc_queryctrl)
633 set_bit(_IOC_NR(VIDIOC_QUERYCTRL), valid_ioctls);
634 if (vdev->ctrl_handler || ops->vidioc_g_ctrl || ops->vidioc_g_ext_ctrls)
635 set_bit(_IOC_NR(VIDIOC_G_CTRL), valid_ioctls);
636 if (vdev->ctrl_handler || ops->vidioc_s_ctrl || ops->vidioc_s_ext_ctrls)
637 set_bit(_IOC_NR(VIDIOC_S_CTRL), valid_ioctls);
638 if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls)
639 set_bit(_IOC_NR(VIDIOC_G_EXT_CTRLS), valid_ioctls);
640 if (vdev->ctrl_handler || ops->vidioc_s_ext_ctrls)
641 set_bit(_IOC_NR(VIDIOC_S_EXT_CTRLS), valid_ioctls);
642 if (vdev->ctrl_handler || ops->vidioc_try_ext_ctrls)
643 set_bit(_IOC_NR(VIDIOC_TRY_EXT_CTRLS), valid_ioctls);
644 if (vdev->ctrl_handler || ops->vidioc_querymenu)
645 set_bit(_IOC_NR(VIDIOC_QUERYMENU), valid_ioctls);
646 SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDIO, vidioc_enumaudio);
647 SET_VALID_IOCTL(ops, VIDIOC_G_AUDIO, vidioc_g_audio);
648 SET_VALID_IOCTL(ops, VIDIOC_S_AUDIO, vidioc_s_audio);
649 SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDOUT, vidioc_enumaudout);
650 SET_VALID_IOCTL(ops, VIDIOC_G_AUDOUT, vidioc_g_audout);
651 SET_VALID_IOCTL(ops, VIDIOC_S_AUDOUT, vidioc_s_audout);
652 SET_VALID_IOCTL(ops, VIDIOC_G_MODULATOR, vidioc_g_modulator);
653 SET_VALID_IOCTL(ops, VIDIOC_S_MODULATOR, vidioc_s_modulator);
654 if (ops->vidioc_g_crop || ops->vidioc_g_selection)
655 set_bit(_IOC_NR(VIDIOC_G_CROP), valid_ioctls);
656 if (ops->vidioc_s_crop || ops->vidioc_s_selection)
657 set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
658 SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection);
659 SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection);
660 if (ops->vidioc_cropcap || ops->vidioc_g_selection)
661 set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls);
662 SET_VALID_IOCTL(ops, VIDIOC_G_JPEGCOMP, vidioc_g_jpegcomp);
663 SET_VALID_IOCTL(ops, VIDIOC_S_JPEGCOMP, vidioc_s_jpegcomp);
664 SET_VALID_IOCTL(ops, VIDIOC_G_ENC_INDEX, vidioc_g_enc_index);
665 SET_VALID_IOCTL(ops, VIDIOC_ENCODER_CMD, vidioc_encoder_cmd);
666 SET_VALID_IOCTL(ops, VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd);
667 SET_VALID_IOCTL(ops, VIDIOC_DECODER_CMD, vidioc_decoder_cmd);
668 SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd);
669 if (ops->vidioc_g_parm || (vdev->vfl_type == VFL_TYPE_GRABBER &&
670 (ops->vidioc_g_std || vdev->tvnorms)))
671 set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
672 SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
673 SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner);
674 SET_VALID_IOCTL(ops, VIDIOC_S_TUNER, vidioc_s_tuner);
675 SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency);
676 SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency);
677 SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap);
678 SET_VALID_IOCTL(ops, VIDIOC_LOG_STATUS, vidioc_log_status);
679#ifdef CONFIG_VIDEO_ADV_DEBUG
680 SET_VALID_IOCTL(ops, VIDIOC_DBG_G_REGISTER, vidioc_g_register);
681 SET_VALID_IOCTL(ops, VIDIOC_DBG_S_REGISTER, vidioc_s_register);
682#endif
683 SET_VALID_IOCTL(ops, VIDIOC_DBG_G_CHIP_IDENT, vidioc_g_chip_ident);
684 SET_VALID_IOCTL(ops, VIDIOC_S_HW_FREQ_SEEK, vidioc_s_hw_freq_seek);
685 SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes);
686 SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals);
687 SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_PRESETS, vidioc_enum_dv_presets);
688 SET_VALID_IOCTL(ops, VIDIOC_S_DV_PRESET, vidioc_s_dv_preset);
689 SET_VALID_IOCTL(ops, VIDIOC_G_DV_PRESET, vidioc_g_dv_preset);
690 SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_PRESET, vidioc_query_dv_preset);
691 SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
692 SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings);
693 SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings);
694 SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings);
695 SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap);
696 /* yes, really vidioc_subscribe_event */
697 SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event);
698 SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event);
699 SET_VALID_IOCTL(ops, VIDIOC_UNSUBSCRIBE_EVENT, vidioc_unsubscribe_event);
700 SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs);
701 SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf);
702 if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator)
703 set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
704 bitmap_andnot(vdev->valid_ioctls, valid_ioctls, vdev->valid_ioctls,
705 BASE_VIDIOC_PRIVATE);
706}
707
708/**
709 * __video_register_device - register video4linux devices
710 * @vdev: video device structure we want to register
711 * @type: type of device to register
712 * @nr: which device node number (0 == /dev/video0, 1 == /dev/video1, ...
713 * -1 == first free)
714 * @warn_if_nr_in_use: warn if the desired device node number
715 * was already in use and another number was chosen instead.
716 * @owner: module that owns the video device node
717 *
718 * The registration code assigns minor numbers and device node numbers
719 * based on the requested type and registers the new device node with
720 * the kernel.
721 *
722 * This function assumes that struct video_device was zeroed when it
723 * was allocated and does not contain any stale date.
724 *
725 * An error is returned if no free minor or device node number could be
726 * found, or if the registration of the device node failed.
727 *
728 * Zero is returned on success.
729 *
730 * Valid types are
731 *
732 * %VFL_TYPE_GRABBER - A frame grabber
733 *
734 * %VFL_TYPE_VBI - Vertical blank data (undecoded)
735 *
736 * %VFL_TYPE_RADIO - A radio card
737 *
738 * %VFL_TYPE_SUBDEV - A subdevice
739 */
740int __video_register_device(struct video_device *vdev, int type, int nr,
741 int warn_if_nr_in_use, struct module *owner)
742{
743 int i = 0;
744 int ret;
745 int minor_offset = 0;
746 int minor_cnt = VIDEO_NUM_DEVICES;
747 const char *name_base;
748
749 /* A minor value of -1 marks this video device as never
750 having been registered */
751 vdev->minor = -1;
752
753 /* the release callback MUST be present */
754 if (WARN_ON(!vdev->release))
755 return -EINVAL;
756
757 /* v4l2_fh support */
758 spin_lock_init(&vdev->fh_lock);
759 INIT_LIST_HEAD(&vdev->fh_list);
760
761 /* Part 1: check device type */
762 switch (type) {
763 case VFL_TYPE_GRABBER:
764 name_base = "video";
765 break;
766 case VFL_TYPE_VBI:
767 name_base = "vbi";
768 break;
769 case VFL_TYPE_RADIO:
770 name_base = "radio";
771 break;
772 case VFL_TYPE_SUBDEV:
773 name_base = "v4l-subdev";
774 break;
775 default:
776 printk(KERN_ERR "%s called with unknown type: %d\n",
777 __func__, type);
778 return -EINVAL;
779 }
780
781 vdev->vfl_type = type;
782 vdev->cdev = NULL;
783 if (vdev->v4l2_dev) {
784 if (vdev->v4l2_dev->dev)
785 vdev->parent = vdev->v4l2_dev->dev;
786 if (vdev->ctrl_handler == NULL)
787 vdev->ctrl_handler = vdev->v4l2_dev->ctrl_handler;
788 /* If the prio state pointer is NULL, then use the v4l2_device
789 prio state. */
790 if (vdev->prio == NULL)
791 vdev->prio = &vdev->v4l2_dev->prio;
792 }
793
794 /* Part 2: find a free minor, device node number and device index. */
795#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES
796 /* Keep the ranges for the first four types for historical
797 * reasons.
798 * Newer devices (not yet in place) should use the range
799 * of 128-191 and just pick the first free minor there
800 * (new style). */
801 switch (type) {
802 case VFL_TYPE_GRABBER:
803 minor_offset = 0;
804 minor_cnt = 64;
805 break;
806 case VFL_TYPE_RADIO:
807 minor_offset = 64;
808 minor_cnt = 64;
809 break;
810 case VFL_TYPE_VBI:
811 minor_offset = 224;
812 minor_cnt = 32;
813 break;
814 default:
815 minor_offset = 128;
816 minor_cnt = 64;
817 break;
818 }
819#endif
820
821 /* Pick a device node number */
822 mutex_lock(&videodev_lock);
823 nr = devnode_find(vdev, nr == -1 ? 0 : nr, minor_cnt);
824 if (nr == minor_cnt)
825 nr = devnode_find(vdev, 0, minor_cnt);
826 if (nr == minor_cnt) {
827 printk(KERN_ERR "could not get a free device node number\n");
828 mutex_unlock(&videodev_lock);
829 return -ENFILE;
830 }
831#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES
832 /* 1-on-1 mapping of device node number to minor number */
833 i = nr;
834#else
835 /* The device node number and minor numbers are independent, so
836 we just find the first free minor number. */
837 for (i = 0; i < VIDEO_NUM_DEVICES; i++)
838 if (video_device[i] == NULL)
839 break;
840 if (i == VIDEO_NUM_DEVICES) {
841 mutex_unlock(&videodev_lock);
842 printk(KERN_ERR "could not get a free minor\n");
843 return -ENFILE;
844 }
845#endif
846 vdev->minor = i + minor_offset;
847 vdev->num = nr;
848 devnode_set(vdev);
849
850 /* Should not happen since we thought this minor was free */
851 WARN_ON(video_device[vdev->minor] != NULL);
852 vdev->index = get_index(vdev);
853 mutex_unlock(&videodev_lock);
854
855 if (vdev->ioctl_ops)
856 determine_valid_ioctls(vdev);
857
858 /* Part 3: Initialize the character device */
859 vdev->cdev = cdev_alloc();
860 if (vdev->cdev == NULL) {
861 ret = -ENOMEM;
862 goto cleanup;
863 }
864 vdev->cdev->ops = &v4l2_fops;
865 vdev->cdev->owner = owner;
866 ret = cdev_add(vdev->cdev, MKDEV(VIDEO_MAJOR, vdev->minor), 1);
867 if (ret < 0) {
868 printk(KERN_ERR "%s: cdev_add failed\n", __func__);
869 kfree(vdev->cdev);
870 vdev->cdev = NULL;
871 goto cleanup;
872 }
873
874 /* Part 4: register the device with sysfs */
875 vdev->dev.class = &video_class;
876 vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
877 if (vdev->parent)
878 vdev->dev.parent = vdev->parent;
879 dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num);
880 ret = device_register(&vdev->dev);
881 if (ret < 0) {
882 printk(KERN_ERR "%s: device_register failed\n", __func__);
883 goto cleanup;
884 }
885 /* Register the release callback that will be called when the last
886 reference to the device goes away. */
887 vdev->dev.release = v4l2_device_release;
888
889 if (nr != -1 && nr != vdev->num && warn_if_nr_in_use)
890 printk(KERN_WARNING "%s: requested %s%d, got %s\n", __func__,
891 name_base, nr, video_device_node_name(vdev));
892
893 /* Increase v4l2_device refcount */
894 if (vdev->v4l2_dev)
895 v4l2_device_get(vdev->v4l2_dev);
896
897#if defined(CONFIG_MEDIA_CONTROLLER)
898 /* Part 5: Register the entity. */
899 if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
900 vdev->vfl_type != VFL_TYPE_SUBDEV) {
901 vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L;
902 vdev->entity.name = vdev->name;
903 vdev->entity.info.v4l.major = VIDEO_MAJOR;
904 vdev->entity.info.v4l.minor = vdev->minor;
905 ret = media_device_register_entity(vdev->v4l2_dev->mdev,
906 &vdev->entity);
907 if (ret < 0)
908 printk(KERN_WARNING
909 "%s: media_device_register_entity failed\n",
910 __func__);
911 }
912#endif
913 /* Part 6: Activate this minor. The char device can now be used. */
914 set_bit(V4L2_FL_REGISTERED, &vdev->flags);
915 mutex_lock(&videodev_lock);
916 video_device[vdev->minor] = vdev;
917 mutex_unlock(&videodev_lock);
918
919 return 0;
920
921cleanup:
922 mutex_lock(&videodev_lock);
923 if (vdev->cdev)
924 cdev_del(vdev->cdev);
925 devnode_clear(vdev);
926 mutex_unlock(&videodev_lock);
927 /* Mark this video device as never having been registered. */
928 vdev->minor = -1;
929 return ret;
930}
931EXPORT_SYMBOL(__video_register_device);
932
933/**
934 * video_unregister_device - unregister a video4linux device
935 * @vdev: the device to unregister
936 *
937 * This unregisters the passed device. Future open calls will
938 * be met with errors.
939 */
940void video_unregister_device(struct video_device *vdev)
941{
942 /* Check if vdev was ever registered at all */
943 if (!vdev || !video_is_registered(vdev))
944 return;
945
946 mutex_lock(&videodev_lock);
947 /* This must be in a critical section to prevent a race with v4l2_open.
948 * Once this bit has been cleared video_get may never be called again.
949 */
950 clear_bit(V4L2_FL_REGISTERED, &vdev->flags);
951 mutex_unlock(&videodev_lock);
952 device_unregister(&vdev->dev);
953}
954EXPORT_SYMBOL(video_unregister_device);
955
956/*
957 * Initialise video for linux
958 */
959static int __init videodev_init(void)
960{
961 dev_t dev = MKDEV(VIDEO_MAJOR, 0);
962 int ret;
963
964 printk(KERN_INFO "Linux video capture interface: v2.00\n");
965 ret = register_chrdev_region(dev, VIDEO_NUM_DEVICES, VIDEO_NAME);
966 if (ret < 0) {
967 printk(KERN_WARNING "videodev: unable to get major %d\n",
968 VIDEO_MAJOR);
969 return ret;
970 }
971
972 ret = class_register(&video_class);
973 if (ret < 0) {
974 unregister_chrdev_region(dev, VIDEO_NUM_DEVICES);
975 printk(KERN_WARNING "video_dev: class_register failed\n");
976 return -EIO;
977 }
978
979 return 0;
980}
981
982static void __exit videodev_exit(void)
983{
984 dev_t dev = MKDEV(VIDEO_MAJOR, 0);
985
986 class_unregister(&video_class);
987 unregister_chrdev_region(dev, VIDEO_NUM_DEVICES);
988}
989
990subsys_initcall(videodev_init);
991module_exit(videodev_exit)
992
993MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@infradead.org>");
994MODULE_DESCRIPTION("Device registrar for Video4Linux drivers v2");
995MODULE_LICENSE("GPL");
996MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR);
997
998
999/*
1000 * Local variables:
1001 * c-basic-offset: 8
1002 * End:
1003 */
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
new file mode 100644
index 000000000000..1f203b85a637
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -0,0 +1,280 @@
1/*
2 V4L2 device support.
3
4 Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/types.h>
22#include <linux/ioctl.h>
23#include <linux/module.h>
24#include <linux/i2c.h>
25#include <linux/slab.h>
26#if defined(CONFIG_SPI)
27#include <linux/spi/spi.h>
28#endif
29#include <linux/videodev2.h>
30#include <media/v4l2-device.h>
31#include <media/v4l2-ctrls.h>
32
33int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
34{
35 if (v4l2_dev == NULL)
36 return -EINVAL;
37
38 INIT_LIST_HEAD(&v4l2_dev->subdevs);
39 spin_lock_init(&v4l2_dev->lock);
40 mutex_init(&v4l2_dev->ioctl_lock);
41 v4l2_prio_init(&v4l2_dev->prio);
42 kref_init(&v4l2_dev->ref);
43 get_device(dev);
44 v4l2_dev->dev = dev;
45 if (dev == NULL) {
46 /* If dev == NULL, then name must be filled in by the caller */
47 WARN_ON(!v4l2_dev->name[0]);
48 return 0;
49 }
50
51 /* Set name to driver name + device name if it is empty. */
52 if (!v4l2_dev->name[0])
53 snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "%s %s",
54 dev->driver->name, dev_name(dev));
55 if (!dev_get_drvdata(dev))
56 dev_set_drvdata(dev, v4l2_dev);
57 return 0;
58}
59EXPORT_SYMBOL_GPL(v4l2_device_register);
60
61static void v4l2_device_release(struct kref *ref)
62{
63 struct v4l2_device *v4l2_dev =
64 container_of(ref, struct v4l2_device, ref);
65
66 if (v4l2_dev->release)
67 v4l2_dev->release(v4l2_dev);
68}
69
70int v4l2_device_put(struct v4l2_device *v4l2_dev)
71{
72 return kref_put(&v4l2_dev->ref, v4l2_device_release);
73}
74EXPORT_SYMBOL_GPL(v4l2_device_put);
75
76int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
77 atomic_t *instance)
78{
79 int num = atomic_inc_return(instance) - 1;
80 int len = strlen(basename);
81
82 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
83 snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
84 "%s-%d", basename, num);
85 else
86 snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
87 "%s%d", basename, num);
88 return num;
89}
90EXPORT_SYMBOL_GPL(v4l2_device_set_name);
91
92void v4l2_device_disconnect(struct v4l2_device *v4l2_dev)
93{
94 if (v4l2_dev->dev == NULL)
95 return;
96
97 if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev)
98 dev_set_drvdata(v4l2_dev->dev, NULL);
99 put_device(v4l2_dev->dev);
100 v4l2_dev->dev = NULL;
101}
102EXPORT_SYMBOL_GPL(v4l2_device_disconnect);
103
104void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
105{
106 struct v4l2_subdev *sd, *next;
107
108 if (v4l2_dev == NULL)
109 return;
110 v4l2_device_disconnect(v4l2_dev);
111
112 /* Unregister subdevs */
113 list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list) {
114 v4l2_device_unregister_subdev(sd);
115#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
116 if (sd->flags & V4L2_SUBDEV_FL_IS_I2C) {
117 struct i2c_client *client = v4l2_get_subdevdata(sd);
118
119 /* We need to unregister the i2c client explicitly.
120 We cannot rely on i2c_del_adapter to always
121 unregister clients for us, since if the i2c bus
122 is a platform bus, then it is never deleted. */
123 if (client)
124 i2c_unregister_device(client);
125 continue;
126 }
127#endif
128#if defined(CONFIG_SPI)
129 if (sd->flags & V4L2_SUBDEV_FL_IS_SPI) {
130 struct spi_device *spi = v4l2_get_subdevdata(sd);
131
132 if (spi)
133 spi_unregister_device(spi);
134 continue;
135 }
136#endif
137 }
138}
139EXPORT_SYMBOL_GPL(v4l2_device_unregister);
140
141int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
142 struct v4l2_subdev *sd)
143{
144#if defined(CONFIG_MEDIA_CONTROLLER)
145 struct media_entity *entity = &sd->entity;
146#endif
147 int err;
148
149 /* Check for valid input */
150 if (v4l2_dev == NULL || sd == NULL || !sd->name[0])
151 return -EINVAL;
152
153 /* Warn if we apparently re-register a subdev */
154 WARN_ON(sd->v4l2_dev != NULL);
155
156 if (!try_module_get(sd->owner))
157 return -ENODEV;
158
159 sd->v4l2_dev = v4l2_dev;
160 if (sd->internal_ops && sd->internal_ops->registered) {
161 err = sd->internal_ops->registered(sd);
162 if (err) {
163 module_put(sd->owner);
164 return err;
165 }
166 }
167
168 /* This just returns 0 if either of the two args is NULL */
169 err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler);
170 if (err) {
171 if (sd->internal_ops && sd->internal_ops->unregistered)
172 sd->internal_ops->unregistered(sd);
173 module_put(sd->owner);
174 return err;
175 }
176
177#if defined(CONFIG_MEDIA_CONTROLLER)
178 /* Register the entity. */
179 if (v4l2_dev->mdev) {
180 err = media_device_register_entity(v4l2_dev->mdev, entity);
181 if (err < 0) {
182 if (sd->internal_ops && sd->internal_ops->unregistered)
183 sd->internal_ops->unregistered(sd);
184 module_put(sd->owner);
185 return err;
186 }
187 }
188#endif
189
190 spin_lock(&v4l2_dev->lock);
191 list_add_tail(&sd->list, &v4l2_dev->subdevs);
192 spin_unlock(&v4l2_dev->lock);
193
194 return 0;
195}
196EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
197
198static void v4l2_device_release_subdev_node(struct video_device *vdev)
199{
200 struct v4l2_subdev *sd = video_get_drvdata(vdev);
201 sd->devnode = NULL;
202 kfree(vdev);
203}
204
205int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
206{
207 struct video_device *vdev;
208 struct v4l2_subdev *sd;
209 int err;
210
211 /* Register a device node for every subdev marked with the
212 * V4L2_SUBDEV_FL_HAS_DEVNODE flag.
213 */
214 list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
215 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
216 continue;
217
218 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
219 if (!vdev) {
220 err = -ENOMEM;
221 goto clean_up;
222 }
223
224 video_set_drvdata(vdev, sd);
225 strlcpy(vdev->name, sd->name, sizeof(vdev->name));
226 vdev->v4l2_dev = v4l2_dev;
227 vdev->fops = &v4l2_subdev_fops;
228 vdev->release = v4l2_device_release_subdev_node;
229 vdev->ctrl_handler = sd->ctrl_handler;
230 err = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1,
231 sd->owner);
232 if (err < 0) {
233 kfree(vdev);
234 goto clean_up;
235 }
236#if defined(CONFIG_MEDIA_CONTROLLER)
237 sd->entity.info.v4l.major = VIDEO_MAJOR;
238 sd->entity.info.v4l.minor = vdev->minor;
239#endif
240 sd->devnode = vdev;
241 }
242 return 0;
243
244clean_up:
245 list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
246 if (!sd->devnode)
247 break;
248 video_unregister_device(sd->devnode);
249 }
250
251 return err;
252}
253EXPORT_SYMBOL_GPL(v4l2_device_register_subdev_nodes);
254
255void v4l2_device_unregister_subdev(struct v4l2_subdev *sd)
256{
257 struct v4l2_device *v4l2_dev;
258
259 /* return if it isn't registered */
260 if (sd == NULL || sd->v4l2_dev == NULL)
261 return;
262
263 v4l2_dev = sd->v4l2_dev;
264
265 spin_lock(&v4l2_dev->lock);
266 list_del(&sd->list);
267 spin_unlock(&v4l2_dev->lock);
268
269 if (sd->internal_ops && sd->internal_ops->unregistered)
270 sd->internal_ops->unregistered(sd);
271 sd->v4l2_dev = NULL;
272
273#if defined(CONFIG_MEDIA_CONTROLLER)
274 if (v4l2_dev->mdev)
275 media_device_unregister_entity(&sd->entity);
276#endif
277 video_unregister_device(sd->devnode);
278 module_put(sd->owner);
279}
280EXPORT_SYMBOL_GPL(v4l2_device_unregister_subdev);
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
new file mode 100644
index 000000000000..ef2a33c94045
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -0,0 +1,313 @@
1/*
2 * v4l2-event.c
3 *
4 * V4L2 events.
5 *
6 * Copyright (C) 2009--2010 Nokia Corporation.
7 *
8 * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * 02110-1301 USA
23 */
24
25#include <media/v4l2-dev.h>
26#include <media/v4l2-fh.h>
27#include <media/v4l2-event.h>
28
29#include <linux/sched.h>
30#include <linux/slab.h>
31#include <linux/export.h>
32
33static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
34{
35 idx += sev->first;
36 return idx >= sev->elems ? idx - sev->elems : idx;
37}
38
39static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
40{
41 struct v4l2_kevent *kev;
42 unsigned long flags;
43
44 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
45
46 if (list_empty(&fh->available)) {
47 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
48 return -ENOENT;
49 }
50
51 WARN_ON(fh->navailable == 0);
52
53 kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
54 list_del(&kev->list);
55 fh->navailable--;
56
57 kev->event.pending = fh->navailable;
58 *event = kev->event;
59 kev->sev->first = sev_pos(kev->sev, 1);
60 kev->sev->in_use--;
61
62 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
63
64 return 0;
65}
66
67int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
68 int nonblocking)
69{
70 int ret;
71
72 if (nonblocking)
73 return __v4l2_event_dequeue(fh, event);
74
75 /* Release the vdev lock while waiting */
76 if (fh->vdev->lock)
77 mutex_unlock(fh->vdev->lock);
78
79 do {
80 ret = wait_event_interruptible(fh->wait,
81 fh->navailable != 0);
82 if (ret < 0)
83 break;
84
85 ret = __v4l2_event_dequeue(fh, event);
86 } while (ret == -ENOENT);
87
88 if (fh->vdev->lock)
89 mutex_lock(fh->vdev->lock);
90
91 return ret;
92}
93EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
94
95/* Caller must hold fh->vdev->fh_lock! */
96static struct v4l2_subscribed_event *v4l2_event_subscribed(
97 struct v4l2_fh *fh, u32 type, u32 id)
98{
99 struct v4l2_subscribed_event *sev;
100
101 assert_spin_locked(&fh->vdev->fh_lock);
102
103 list_for_each_entry(sev, &fh->subscribed, list)
104 if (sev->type == type && sev->id == id)
105 return sev;
106
107 return NULL;
108}
109
110static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
111 const struct timespec *ts)
112{
113 struct v4l2_subscribed_event *sev;
114 struct v4l2_kevent *kev;
115 bool copy_payload = true;
116
117 /* Are we subscribed? */
118 sev = v4l2_event_subscribed(fh, ev->type, ev->id);
119 if (sev == NULL)
120 return;
121
122 /*
123 * If the event has been added to the fh->subscribed list, but its
124 * add op has not completed yet elems will be 0, treat this as
125 * not being subscribed.
126 */
127 if (!sev->elems)
128 return;
129
130 /* Increase event sequence number on fh. */
131 fh->sequence++;
132
133 /* Do we have any free events? */
134 if (sev->in_use == sev->elems) {
135 /* no, remove the oldest one */
136 kev = sev->events + sev_pos(sev, 0);
137 list_del(&kev->list);
138 sev->in_use--;
139 sev->first = sev_pos(sev, 1);
140 fh->navailable--;
141 if (sev->elems == 1) {
142 if (sev->ops && sev->ops->replace) {
143 sev->ops->replace(&kev->event, ev);
144 copy_payload = false;
145 }
146 } else if (sev->ops && sev->ops->merge) {
147 struct v4l2_kevent *second_oldest =
148 sev->events + sev_pos(sev, 0);
149 sev->ops->merge(&kev->event, &second_oldest->event);
150 }
151 }
152
153 /* Take one and fill it. */
154 kev = sev->events + sev_pos(sev, sev->in_use);
155 kev->event.type = ev->type;
156 if (copy_payload)
157 kev->event.u = ev->u;
158 kev->event.id = ev->id;
159 kev->event.timestamp = *ts;
160 kev->event.sequence = fh->sequence;
161 sev->in_use++;
162 list_add_tail(&kev->list, &fh->available);
163
164 fh->navailable++;
165
166 wake_up_all(&fh->wait);
167}
168
169void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
170{
171 struct v4l2_fh *fh;
172 unsigned long flags;
173 struct timespec timestamp;
174
175 ktime_get_ts(&timestamp);
176
177 spin_lock_irqsave(&vdev->fh_lock, flags);
178
179 list_for_each_entry(fh, &vdev->fh_list, list)
180 __v4l2_event_queue_fh(fh, ev, &timestamp);
181
182 spin_unlock_irqrestore(&vdev->fh_lock, flags);
183}
184EXPORT_SYMBOL_GPL(v4l2_event_queue);
185
186void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
187{
188 unsigned long flags;
189 struct timespec timestamp;
190
191 ktime_get_ts(&timestamp);
192
193 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
194 __v4l2_event_queue_fh(fh, ev, &timestamp);
195 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
196}
197EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
198
199int v4l2_event_pending(struct v4l2_fh *fh)
200{
201 return fh->navailable;
202}
203EXPORT_SYMBOL_GPL(v4l2_event_pending);
204
205int v4l2_event_subscribe(struct v4l2_fh *fh,
206 struct v4l2_event_subscription *sub, unsigned elems,
207 const struct v4l2_subscribed_event_ops *ops)
208{
209 struct v4l2_subscribed_event *sev, *found_ev;
210 unsigned long flags;
211 unsigned i;
212
213 if (sub->type == V4L2_EVENT_ALL)
214 return -EINVAL;
215
216 if (elems < 1)
217 elems = 1;
218
219 sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
220 if (!sev)
221 return -ENOMEM;
222 for (i = 0; i < elems; i++)
223 sev->events[i].sev = sev;
224 sev->type = sub->type;
225 sev->id = sub->id;
226 sev->flags = sub->flags;
227 sev->fh = fh;
228 sev->ops = ops;
229
230 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
231 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
232 if (!found_ev)
233 list_add(&sev->list, &fh->subscribed);
234 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
235
236 if (found_ev) {
237 kfree(sev);
238 return 0; /* Already listening */
239 }
240
241 if (sev->ops && sev->ops->add) {
242 int ret = sev->ops->add(sev, elems);
243 if (ret) {
244 sev->ops = NULL;
245 v4l2_event_unsubscribe(fh, sub);
246 return ret;
247 }
248 }
249
250 /* Mark as ready for use */
251 sev->elems = elems;
252
253 return 0;
254}
255EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
256
257void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
258{
259 struct v4l2_event_subscription sub;
260 struct v4l2_subscribed_event *sev;
261 unsigned long flags;
262
263 do {
264 sev = NULL;
265
266 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
267 if (!list_empty(&fh->subscribed)) {
268 sev = list_first_entry(&fh->subscribed,
269 struct v4l2_subscribed_event, list);
270 sub.type = sev->type;
271 sub.id = sev->id;
272 }
273 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
274 if (sev)
275 v4l2_event_unsubscribe(fh, &sub);
276 } while (sev);
277}
278EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
279
280int v4l2_event_unsubscribe(struct v4l2_fh *fh,
281 struct v4l2_event_subscription *sub)
282{
283 struct v4l2_subscribed_event *sev;
284 unsigned long flags;
285 int i;
286
287 if (sub->type == V4L2_EVENT_ALL) {
288 v4l2_event_unsubscribe_all(fh);
289 return 0;
290 }
291
292 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
293
294 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
295 if (sev != NULL) {
296 /* Remove any pending events for this subscription */
297 for (i = 0; i < sev->in_use; i++) {
298 list_del(&sev->events[sev_pos(sev, i)].list);
299 fh->navailable--;
300 }
301 list_del(&sev->list);
302 }
303
304 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
305
306 if (sev && sev->ops && sev->ops->del)
307 sev->ops->del(sev);
308
309 kfree(sev);
310
311 return 0;
312}
313EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
new file mode 100644
index 000000000000..9e3fc040ea20
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -0,0 +1,120 @@
1/*
2 * v4l2-fh.c
3 *
4 * V4L2 file handles.
5 *
6 * Copyright (C) 2009--2010 Nokia Corporation.
7 *
8 * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * 02110-1301 USA
23 */
24
25#include <linux/bitops.h>
26#include <linux/slab.h>
27#include <linux/export.h>
28#include <media/v4l2-dev.h>
29#include <media/v4l2-fh.h>
30#include <media/v4l2-event.h>
31#include <media/v4l2-ioctl.h>
32
33void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
34{
35 fh->vdev = vdev;
36 /* Inherit from video_device. May be overridden by the driver. */
37 fh->ctrl_handler = vdev->ctrl_handler;
38 INIT_LIST_HEAD(&fh->list);
39 set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags);
40 fh->prio = V4L2_PRIORITY_UNSET;
41 init_waitqueue_head(&fh->wait);
42 INIT_LIST_HEAD(&fh->available);
43 INIT_LIST_HEAD(&fh->subscribed);
44 fh->sequence = -1;
45}
46EXPORT_SYMBOL_GPL(v4l2_fh_init);
47
48void v4l2_fh_add(struct v4l2_fh *fh)
49{
50 unsigned long flags;
51
52 if (test_bit(V4L2_FL_USE_FH_PRIO, &fh->vdev->flags))
53 v4l2_prio_open(fh->vdev->prio, &fh->prio);
54 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
55 list_add(&fh->list, &fh->vdev->fh_list);
56 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
57}
58EXPORT_SYMBOL_GPL(v4l2_fh_add);
59
60int v4l2_fh_open(struct file *filp)
61{
62 struct video_device *vdev = video_devdata(filp);
63 struct v4l2_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
64
65 filp->private_data = fh;
66 if (fh == NULL)
67 return -ENOMEM;
68 v4l2_fh_init(fh, vdev);
69 v4l2_fh_add(fh);
70 return 0;
71}
72EXPORT_SYMBOL_GPL(v4l2_fh_open);
73
74void v4l2_fh_del(struct v4l2_fh *fh)
75{
76 unsigned long flags;
77
78 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
79 list_del_init(&fh->list);
80 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
81 if (test_bit(V4L2_FL_USE_FH_PRIO, &fh->vdev->flags))
82 v4l2_prio_close(fh->vdev->prio, fh->prio);
83}
84EXPORT_SYMBOL_GPL(v4l2_fh_del);
85
86void v4l2_fh_exit(struct v4l2_fh *fh)
87{
88 if (fh->vdev == NULL)
89 return;
90 v4l2_event_unsubscribe_all(fh);
91 fh->vdev = NULL;
92}
93EXPORT_SYMBOL_GPL(v4l2_fh_exit);
94
95int v4l2_fh_release(struct file *filp)
96{
97 struct v4l2_fh *fh = filp->private_data;
98
99 if (fh) {
100 v4l2_fh_del(fh);
101 v4l2_fh_exit(fh);
102 kfree(fh);
103 }
104 return 0;
105}
106EXPORT_SYMBOL_GPL(v4l2_fh_release);
107
108int v4l2_fh_is_singular(struct v4l2_fh *fh)
109{
110 unsigned long flags;
111 int is_singular;
112
113 if (fh == NULL || fh->vdev == NULL)
114 return 0;
115 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
116 is_singular = list_is_singular(&fh->list);
117 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
118 return is_singular;
119}
120EXPORT_SYMBOL_GPL(v4l2_fh_is_singular);
diff --git a/drivers/media/v4l2-core/v4l2-int-device.c b/drivers/media/v4l2-core/v4l2-int-device.c
new file mode 100644
index 000000000000..f4473494af7a
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-int-device.c
@@ -0,0 +1,164 @@
1/*
2 * drivers/media/video/v4l2-int-device.c
3 *
4 * V4L2 internal ioctl interface.
5 *
6 * Copyright (C) 2007 Nokia Corporation.
7 *
8 * Contact: Sakari Ailus <sakari.ailus@nokia.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * 02110-1301 USA
23 */
24
25#include <linux/kernel.h>
26#include <linux/list.h>
27#include <linux/sort.h>
28#include <linux/string.h>
29#include <linux/module.h>
30
31#include <media/v4l2-int-device.h>
32
33static DEFINE_MUTEX(mutex);
34static LIST_HEAD(int_list);
35
36void v4l2_int_device_try_attach_all(void)
37{
38 struct v4l2_int_device *m, *s;
39
40 list_for_each_entry(m, &int_list, head) {
41 if (m->type != v4l2_int_type_master)
42 continue;
43
44 list_for_each_entry(s, &int_list, head) {
45 if (s->type != v4l2_int_type_slave)
46 continue;
47
48 /* Slave is connected? */
49 if (s->u.slave->master)
50 continue;
51
52 /* Slave wants to attach to master? */
53 if (s->u.slave->attach_to[0] != 0
54 && strncmp(m->name, s->u.slave->attach_to,
55 V4L2NAMESIZE))
56 continue;
57
58 if (!try_module_get(m->module))
59 continue;
60
61 s->u.slave->master = m;
62 if (m->u.master->attach(s)) {
63 s->u.slave->master = NULL;
64 module_put(m->module);
65 continue;
66 }
67 }
68 }
69}
70EXPORT_SYMBOL_GPL(v4l2_int_device_try_attach_all);
71
72static int ioctl_sort_cmp(const void *a, const void *b)
73{
74 const struct v4l2_int_ioctl_desc *d1 = a, *d2 = b;
75
76 if (d1->num > d2->num)
77 return 1;
78
79 if (d1->num < d2->num)
80 return -1;
81
82 return 0;
83}
84
85int v4l2_int_device_register(struct v4l2_int_device *d)
86{
87 if (d->type == v4l2_int_type_slave)
88 sort(d->u.slave->ioctls, d->u.slave->num_ioctls,
89 sizeof(struct v4l2_int_ioctl_desc),
90 &ioctl_sort_cmp, NULL);
91 mutex_lock(&mutex);
92 list_add(&d->head, &int_list);
93 v4l2_int_device_try_attach_all();
94 mutex_unlock(&mutex);
95
96 return 0;
97}
98EXPORT_SYMBOL_GPL(v4l2_int_device_register);
99
100void v4l2_int_device_unregister(struct v4l2_int_device *d)
101{
102 mutex_lock(&mutex);
103 list_del(&d->head);
104 if (d->type == v4l2_int_type_slave
105 && d->u.slave->master != NULL) {
106 d->u.slave->master->u.master->detach(d);
107 module_put(d->u.slave->master->module);
108 d->u.slave->master = NULL;
109 }
110 mutex_unlock(&mutex);
111}
112EXPORT_SYMBOL_GPL(v4l2_int_device_unregister);
113
114/* Adapted from search_extable in extable.c. */
115static v4l2_int_ioctl_func *find_ioctl(struct v4l2_int_slave *slave, int cmd,
116 v4l2_int_ioctl_func *no_such_ioctl)
117{
118 const struct v4l2_int_ioctl_desc *first = slave->ioctls;
119 const struct v4l2_int_ioctl_desc *last =
120 first + slave->num_ioctls - 1;
121
122 while (first <= last) {
123 const struct v4l2_int_ioctl_desc *mid;
124
125 mid = (last - first) / 2 + first;
126
127 if (mid->num < cmd)
128 first = mid + 1;
129 else if (mid->num > cmd)
130 last = mid - 1;
131 else
132 return mid->func;
133 }
134
135 return no_such_ioctl;
136}
137
138static int no_such_ioctl_0(struct v4l2_int_device *d)
139{
140 return -ENOIOCTLCMD;
141}
142
143int v4l2_int_ioctl_0(struct v4l2_int_device *d, int cmd)
144{
145 return ((v4l2_int_ioctl_func_0 *)
146 find_ioctl(d->u.slave, cmd,
147 (v4l2_int_ioctl_func *)no_such_ioctl_0))(d);
148}
149EXPORT_SYMBOL_GPL(v4l2_int_ioctl_0);
150
151static int no_such_ioctl_1(struct v4l2_int_device *d, void *arg)
152{
153 return -ENOIOCTLCMD;
154}
155
156int v4l2_int_ioctl_1(struct v4l2_int_device *d, int cmd, void *arg)
157{
158 return ((v4l2_int_ioctl_func_1 *)
159 find_ioctl(d->u.slave, cmd,
160 (v4l2_int_ioctl_func *)no_such_ioctl_1))(d, arg);
161}
162EXPORT_SYMBOL_GPL(v4l2_int_ioctl_1);
163
164MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
new file mode 100644
index 000000000000..c3b7b5f59b32
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -0,0 +1,2324 @@
1/*
2 * Video capture interface for Linux version 2
3 *
4 * A generic framework to process V4L2 ioctl commands.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1)
12 * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2)
13 */
14
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/version.h>
20
21#include <linux/videodev2.h>
22
23#include <media/v4l2-common.h>
24#include <media/v4l2-ioctl.h>
25#include <media/v4l2-ctrls.h>
26#include <media/v4l2-fh.h>
27#include <media/v4l2-event.h>
28#include <media/v4l2-device.h>
29#include <media/v4l2-chip-ident.h>
30#include <media/videobuf2-core.h>
31
32/* Zero out the end of the struct pointed to by p. Everything after, but
33 * not including, the specified field is cleared. */
34#define CLEAR_AFTER_FIELD(p, field) \
35 memset((u8 *)(p) + offsetof(typeof(*(p)), field) + sizeof((p)->field), \
36 0, sizeof(*(p)) - offsetof(typeof(*(p)), field) - sizeof((p)->field))
37
38struct std_descr {
39 v4l2_std_id std;
40 const char *descr;
41};
42
43static const struct std_descr standards[] = {
44 { V4L2_STD_NTSC, "NTSC" },
45 { V4L2_STD_NTSC_M, "NTSC-M" },
46 { V4L2_STD_NTSC_M_JP, "NTSC-M-JP" },
47 { V4L2_STD_NTSC_M_KR, "NTSC-M-KR" },
48 { V4L2_STD_NTSC_443, "NTSC-443" },
49 { V4L2_STD_PAL, "PAL" },
50 { V4L2_STD_PAL_BG, "PAL-BG" },
51 { V4L2_STD_PAL_B, "PAL-B" },
52 { V4L2_STD_PAL_B1, "PAL-B1" },
53 { V4L2_STD_PAL_G, "PAL-G" },
54 { V4L2_STD_PAL_H, "PAL-H" },
55 { V4L2_STD_PAL_I, "PAL-I" },
56 { V4L2_STD_PAL_DK, "PAL-DK" },
57 { V4L2_STD_PAL_D, "PAL-D" },
58 { V4L2_STD_PAL_D1, "PAL-D1" },
59 { V4L2_STD_PAL_K, "PAL-K" },
60 { V4L2_STD_PAL_M, "PAL-M" },
61 { V4L2_STD_PAL_N, "PAL-N" },
62 { V4L2_STD_PAL_Nc, "PAL-Nc" },
63 { V4L2_STD_PAL_60, "PAL-60" },
64 { V4L2_STD_SECAM, "SECAM" },
65 { V4L2_STD_SECAM_B, "SECAM-B" },
66 { V4L2_STD_SECAM_G, "SECAM-G" },
67 { V4L2_STD_SECAM_H, "SECAM-H" },
68 { V4L2_STD_SECAM_DK, "SECAM-DK" },
69 { V4L2_STD_SECAM_D, "SECAM-D" },
70 { V4L2_STD_SECAM_K, "SECAM-K" },
71 { V4L2_STD_SECAM_K1, "SECAM-K1" },
72 { V4L2_STD_SECAM_L, "SECAM-L" },
73 { V4L2_STD_SECAM_LC, "SECAM-Lc" },
74 { 0, "Unknown" }
75};
76
77/* video4linux standard ID conversion to standard name
78 */
79const char *v4l2_norm_to_name(v4l2_std_id id)
80{
81 u32 myid = id;
82 int i;
83
84 /* HACK: ppc32 architecture doesn't have __ucmpdi2 function to handle
85 64 bit comparations. So, on that architecture, with some gcc
86 variants, compilation fails. Currently, the max value is 30bit wide.
87 */
88 BUG_ON(myid != id);
89
90 for (i = 0; standards[i].std; i++)
91 if (myid == standards[i].std)
92 break;
93 return standards[i].descr;
94}
95EXPORT_SYMBOL(v4l2_norm_to_name);
96
97/* Returns frame period for the given standard */
98void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod)
99{
100 if (id & V4L2_STD_525_60) {
101 frameperiod->numerator = 1001;
102 frameperiod->denominator = 30000;
103 } else {
104 frameperiod->numerator = 1;
105 frameperiod->denominator = 25;
106 }
107}
108EXPORT_SYMBOL(v4l2_video_std_frame_period);
109
110/* Fill in the fields of a v4l2_standard structure according to the
111 'id' and 'transmission' parameters. Returns negative on error. */
112int v4l2_video_std_construct(struct v4l2_standard *vs,
113 int id, const char *name)
114{
115 vs->id = id;
116 v4l2_video_std_frame_period(id, &vs->frameperiod);
117 vs->framelines = (id & V4L2_STD_525_60) ? 525 : 625;
118 strlcpy(vs->name, name, sizeof(vs->name));
119 return 0;
120}
121EXPORT_SYMBOL(v4l2_video_std_construct);
122
123/* ----------------------------------------------------------------- */
124/* some arrays for pretty-printing debug messages of enum types */
125
126const char *v4l2_field_names[] = {
127 [V4L2_FIELD_ANY] = "any",
128 [V4L2_FIELD_NONE] = "none",
129 [V4L2_FIELD_TOP] = "top",
130 [V4L2_FIELD_BOTTOM] = "bottom",
131 [V4L2_FIELD_INTERLACED] = "interlaced",
132 [V4L2_FIELD_SEQ_TB] = "seq-tb",
133 [V4L2_FIELD_SEQ_BT] = "seq-bt",
134 [V4L2_FIELD_ALTERNATE] = "alternate",
135 [V4L2_FIELD_INTERLACED_TB] = "interlaced-tb",
136 [V4L2_FIELD_INTERLACED_BT] = "interlaced-bt",
137};
138EXPORT_SYMBOL(v4l2_field_names);
139
140const char *v4l2_type_names[] = {
141 [V4L2_BUF_TYPE_VIDEO_CAPTURE] = "vid-cap",
142 [V4L2_BUF_TYPE_VIDEO_OVERLAY] = "vid-overlay",
143 [V4L2_BUF_TYPE_VIDEO_OUTPUT] = "vid-out",
144 [V4L2_BUF_TYPE_VBI_CAPTURE] = "vbi-cap",
145 [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out",
146 [V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-cap",
147 [V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "sliced-vbi-out",
148 [V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "vid-out-overlay",
149 [V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE] = "vid-cap-mplane",
150 [V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE] = "vid-out-mplane",
151};
152EXPORT_SYMBOL(v4l2_type_names);
153
154static const char *v4l2_memory_names[] = {
155 [V4L2_MEMORY_MMAP] = "mmap",
156 [V4L2_MEMORY_USERPTR] = "userptr",
157 [V4L2_MEMORY_OVERLAY] = "overlay",
158};
159
160#define prt_names(a, arr) ((((a) >= 0) && ((a) < ARRAY_SIZE(arr))) ? \
161 arr[a] : "unknown")
162
163/* ------------------------------------------------------------------ */
164/* debug help functions */
165
166static void v4l_print_querycap(const void *arg, bool write_only)
167{
168 const struct v4l2_capability *p = arg;
169
170 pr_cont("driver=%s, card=%s, bus=%s, version=0x%08x, "
171 "capabilities=0x%08x, device_caps=0x%08x\n",
172 p->driver, p->card, p->bus_info,
173 p->version, p->capabilities, p->device_caps);
174}
175
176static void v4l_print_enuminput(const void *arg, bool write_only)
177{
178 const struct v4l2_input *p = arg;
179
180 pr_cont("index=%u, name=%s, type=%u, audioset=0x%x, tuner=%u, "
181 "std=0x%08Lx, status=0x%x, capabilities=0x%x\n",
182 p->index, p->name, p->type, p->audioset, p->tuner,
183 (unsigned long long)p->std, p->status, p->capabilities);
184}
185
186static void v4l_print_enumoutput(const void *arg, bool write_only)
187{
188 const struct v4l2_output *p = arg;
189
190 pr_cont("index=%u, name=%s, type=%u, audioset=0x%x, "
191 "modulator=%u, std=0x%08Lx, capabilities=0x%x\n",
192 p->index, p->name, p->type, p->audioset, p->modulator,
193 (unsigned long long)p->std, p->capabilities);
194}
195
196static void v4l_print_audio(const void *arg, bool write_only)
197{
198 const struct v4l2_audio *p = arg;
199
200 if (write_only)
201 pr_cont("index=%u, mode=0x%x\n", p->index, p->mode);
202 else
203 pr_cont("index=%u, name=%s, capability=0x%x, mode=0x%x\n",
204 p->index, p->name, p->capability, p->mode);
205}
206
207static void v4l_print_audioout(const void *arg, bool write_only)
208{
209 const struct v4l2_audioout *p = arg;
210
211 if (write_only)
212 pr_cont("index=%u\n", p->index);
213 else
214 pr_cont("index=%u, name=%s, capability=0x%x, mode=0x%x\n",
215 p->index, p->name, p->capability, p->mode);
216}
217
218static void v4l_print_fmtdesc(const void *arg, bool write_only)
219{
220 const struct v4l2_fmtdesc *p = arg;
221
222 pr_cont("index=%u, type=%s, flags=0x%x, pixelformat=%c%c%c%c, description='%s'\n",
223 p->index, prt_names(p->type, v4l2_type_names),
224 p->flags, (p->pixelformat & 0xff),
225 (p->pixelformat >> 8) & 0xff,
226 (p->pixelformat >> 16) & 0xff,
227 (p->pixelformat >> 24) & 0xff,
228 p->description);
229}
230
231static void v4l_print_format(const void *arg, bool write_only)
232{
233 const struct v4l2_format *p = arg;
234 const struct v4l2_pix_format *pix;
235 const struct v4l2_pix_format_mplane *mp;
236 const struct v4l2_vbi_format *vbi;
237 const struct v4l2_sliced_vbi_format *sliced;
238 const struct v4l2_window *win;
239 const struct v4l2_clip *clip;
240 unsigned i;
241
242 pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
243 switch (p->type) {
244 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
245 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
246 pix = &p->fmt.pix;
247 pr_cont(", width=%u, height=%u, "
248 "pixelformat=%c%c%c%c, field=%s, "
249 "bytesperline=%u sizeimage=%u, colorspace=%d\n",
250 pix->width, pix->height,
251 (pix->pixelformat & 0xff),
252 (pix->pixelformat >> 8) & 0xff,
253 (pix->pixelformat >> 16) & 0xff,
254 (pix->pixelformat >> 24) & 0xff,
255 prt_names(pix->field, v4l2_field_names),
256 pix->bytesperline, pix->sizeimage,
257 pix->colorspace);
258 break;
259 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
260 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
261 mp = &p->fmt.pix_mp;
262 pr_cont(", width=%u, height=%u, "
263 "format=%c%c%c%c, field=%s, "
264 "colorspace=%d, num_planes=%u\n",
265 mp->width, mp->height,
266 (mp->pixelformat & 0xff),
267 (mp->pixelformat >> 8) & 0xff,
268 (mp->pixelformat >> 16) & 0xff,
269 (mp->pixelformat >> 24) & 0xff,
270 prt_names(mp->field, v4l2_field_names),
271 mp->colorspace, mp->num_planes);
272 for (i = 0; i < mp->num_planes; i++)
273 printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
274 mp->plane_fmt[i].bytesperline,
275 mp->plane_fmt[i].sizeimage);
276 break;
277 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
278 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
279 win = &p->fmt.win;
280 pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, "
281 "chromakey=0x%08x, bitmap=%p, "
282 "global_alpha=0x%02x\n",
283 win->w.width, win->w.height,
284 win->w.left, win->w.top,
285 prt_names(win->field, v4l2_field_names),
286 win->chromakey, win->bitmap, win->global_alpha);
287 clip = win->clips;
288 for (i = 0; i < win->clipcount; i++) {
289 printk(KERN_DEBUG "clip %u: wxh=%dx%d, x,y=%d,%d\n",
290 i, clip->c.width, clip->c.height,
291 clip->c.left, clip->c.top);
292 clip = clip->next;
293 }
294 break;
295 case V4L2_BUF_TYPE_VBI_CAPTURE:
296 case V4L2_BUF_TYPE_VBI_OUTPUT:
297 vbi = &p->fmt.vbi;
298 pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, "
299 "sample_format=%c%c%c%c, start=%u,%u, count=%u,%u\n",
300 vbi->sampling_rate, vbi->offset,
301 vbi->samples_per_line,
302 (vbi->sample_format & 0xff),
303 (vbi->sample_format >> 8) & 0xff,
304 (vbi->sample_format >> 16) & 0xff,
305 (vbi->sample_format >> 24) & 0xff,
306 vbi->start[0], vbi->start[1],
307 vbi->count[0], vbi->count[1]);
308 break;
309 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
310 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
311 sliced = &p->fmt.sliced;
312 pr_cont(", service_set=0x%08x, io_size=%d\n",
313 sliced->service_set, sliced->io_size);
314 for (i = 0; i < 24; i++)
315 printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i,
316 sliced->service_lines[0][i],
317 sliced->service_lines[1][i]);
318 break;
319 case V4L2_BUF_TYPE_PRIVATE:
320 pr_cont("\n");
321 break;
322 }
323}
324
325static void v4l_print_framebuffer(const void *arg, bool write_only)
326{
327 const struct v4l2_framebuffer *p = arg;
328
329 pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, "
330 "height=%u, pixelformat=%c%c%c%c, "
331 "bytesperline=%u sizeimage=%u, colorspace=%d\n",
332 p->capability, p->flags, p->base,
333 p->fmt.width, p->fmt.height,
334 (p->fmt.pixelformat & 0xff),
335 (p->fmt.pixelformat >> 8) & 0xff,
336 (p->fmt.pixelformat >> 16) & 0xff,
337 (p->fmt.pixelformat >> 24) & 0xff,
338 p->fmt.bytesperline, p->fmt.sizeimage,
339 p->fmt.colorspace);
340}
341
342static void v4l_print_buftype(const void *arg, bool write_only)
343{
344 pr_cont("type=%s\n", prt_names(*(u32 *)arg, v4l2_type_names));
345}
346
347static void v4l_print_modulator(const void *arg, bool write_only)
348{
349 const struct v4l2_modulator *p = arg;
350
351 if (write_only)
352 pr_cont("index=%u, txsubchans=0x%x", p->index, p->txsubchans);
353 else
354 pr_cont("index=%u, name=%s, capability=0x%x, "
355 "rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
356 p->index, p->name, p->capability,
357 p->rangelow, p->rangehigh, p->txsubchans);
358}
359
360static void v4l_print_tuner(const void *arg, bool write_only)
361{
362 const struct v4l2_tuner *p = arg;
363
364 if (write_only)
365 pr_cont("index=%u, audmode=%u\n", p->index, p->audmode);
366 else
367 pr_cont("index=%u, name=%s, type=%u, capability=0x%x, "
368 "rangelow=%u, rangehigh=%u, signal=%u, afc=%d, "
369 "rxsubchans=0x%x, audmode=%u\n",
370 p->index, p->name, p->type,
371 p->capability, p->rangelow,
372 p->rangehigh, p->signal, p->afc,
373 p->rxsubchans, p->audmode);
374}
375
376static void v4l_print_frequency(const void *arg, bool write_only)
377{
378 const struct v4l2_frequency *p = arg;
379
380 pr_cont("tuner=%u, type=%u, frequency=%u\n",
381 p->tuner, p->type, p->frequency);
382}
383
384static void v4l_print_standard(const void *arg, bool write_only)
385{
386 const struct v4l2_standard *p = arg;
387
388 pr_cont("index=%u, id=0x%Lx, name=%s, fps=%u/%u, "
389 "framelines=%u\n", p->index,
390 (unsigned long long)p->id, p->name,
391 p->frameperiod.numerator,
392 p->frameperiod.denominator,
393 p->framelines);
394}
395
396static void v4l_print_std(const void *arg, bool write_only)
397{
398 pr_cont("std=0x%08Lx\n", *(const long long unsigned *)arg);
399}
400
401static void v4l_print_hw_freq_seek(const void *arg, bool write_only)
402{
403 const struct v4l2_hw_freq_seek *p = arg;
404
405 pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u\n",
406 p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing);
407}
408
409static void v4l_print_requestbuffers(const void *arg, bool write_only)
410{
411 const struct v4l2_requestbuffers *p = arg;
412
413 pr_cont("count=%d, type=%s, memory=%s\n",
414 p->count,
415 prt_names(p->type, v4l2_type_names),
416 prt_names(p->memory, v4l2_memory_names));
417}
418
419static void v4l_print_buffer(const void *arg, bool write_only)
420{
421 const struct v4l2_buffer *p = arg;
422 const struct v4l2_timecode *tc = &p->timecode;
423 const struct v4l2_plane *plane;
424 int i;
425
426 pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, "
427 "flags=0x%08x, field=%s, sequence=%d, memory=%s",
428 p->timestamp.tv_sec / 3600,
429 (int)(p->timestamp.tv_sec / 60) % 60,
430 (int)(p->timestamp.tv_sec % 60),
431 (long)p->timestamp.tv_usec,
432 p->index,
433 prt_names(p->type, v4l2_type_names),
434 p->flags, prt_names(p->field, v4l2_field_names),
435 p->sequence, prt_names(p->memory, v4l2_memory_names));
436
437 if (V4L2_TYPE_IS_MULTIPLANAR(p->type) && p->m.planes) {
438 pr_cont("\n");
439 for (i = 0; i < p->length; ++i) {
440 plane = &p->m.planes[i];
441 printk(KERN_DEBUG
442 "plane %d: bytesused=%d, data_offset=0x%08x "
443 "offset/userptr=0x%lx, length=%d\n",
444 i, plane->bytesused, plane->data_offset,
445 plane->m.userptr, plane->length);
446 }
447 } else {
448 pr_cont("bytesused=%d, offset/userptr=0x%lx, length=%d\n",
449 p->bytesused, p->m.userptr, p->length);
450 }
451
452 printk(KERN_DEBUG "timecode=%02d:%02d:%02d type=%d, "
453 "flags=0x%08x, frames=%d, userbits=0x%08x\n",
454 tc->hours, tc->minutes, tc->seconds,
455 tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits);
456}
457
458static void v4l_print_create_buffers(const void *arg, bool write_only)
459{
460 const struct v4l2_create_buffers *p = arg;
461
462 pr_cont("index=%d, count=%d, memory=%s, ",
463 p->index, p->count,
464 prt_names(p->memory, v4l2_memory_names));
465 v4l_print_format(&p->format, write_only);
466}
467
468static void v4l_print_streamparm(const void *arg, bool write_only)
469{
470 const struct v4l2_streamparm *p = arg;
471
472 pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
473
474 if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
475 p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
476 const struct v4l2_captureparm *c = &p->parm.capture;
477
478 pr_cont(", capability=0x%x, capturemode=0x%x, timeperframe=%d/%d, "
479 "extendedmode=%d, readbuffers=%d\n",
480 c->capability, c->capturemode,
481 c->timeperframe.numerator, c->timeperframe.denominator,
482 c->extendedmode, c->readbuffers);
483 } else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
484 p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
485 const struct v4l2_outputparm *c = &p->parm.output;
486
487 pr_cont(", capability=0x%x, outputmode=0x%x, timeperframe=%d/%d, "
488 "extendedmode=%d, writebuffers=%d\n",
489 c->capability, c->outputmode,
490 c->timeperframe.numerator, c->timeperframe.denominator,
491 c->extendedmode, c->writebuffers);
492 }
493}
494
495static void v4l_print_queryctrl(const void *arg, bool write_only)
496{
497 const struct v4l2_queryctrl *p = arg;
498
499 pr_cont("id=0x%x, type=%d, name=%s, min/max=%d/%d, "
500 "step=%d, default=%d, flags=0x%08x\n",
501 p->id, p->type, p->name,
502 p->minimum, p->maximum,
503 p->step, p->default_value, p->flags);
504}
505
506static void v4l_print_querymenu(const void *arg, bool write_only)
507{
508 const struct v4l2_querymenu *p = arg;
509
510 pr_cont("id=0x%x, index=%d\n", p->id, p->index);
511}
512
513static void v4l_print_control(const void *arg, bool write_only)
514{
515 const struct v4l2_control *p = arg;
516
517 pr_cont("id=0x%x, value=%d\n", p->id, p->value);
518}
519
520static void v4l_print_ext_controls(const void *arg, bool write_only)
521{
522 const struct v4l2_ext_controls *p = arg;
523 int i;
524
525 pr_cont("class=0x%x, count=%d, error_idx=%d",
526 p->ctrl_class, p->count, p->error_idx);
527 for (i = 0; i < p->count; i++) {
528 if (p->controls[i].size)
529 pr_cont(", id/val=0x%x/0x%x",
530 p->controls[i].id, p->controls[i].value);
531 else
532 pr_cont(", id/size=0x%x/%u",
533 p->controls[i].id, p->controls[i].size);
534 }
535 pr_cont("\n");
536}
537
538static void v4l_print_cropcap(const void *arg, bool write_only)
539{
540 const struct v4l2_cropcap *p = arg;
541
542 pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, "
543 "defrect wxh=%dx%d, x,y=%d,%d\n, "
544 "pixelaspect %d/%d\n",
545 prt_names(p->type, v4l2_type_names),
546 p->bounds.width, p->bounds.height,
547 p->bounds.left, p->bounds.top,
548 p->defrect.width, p->defrect.height,
549 p->defrect.left, p->defrect.top,
550 p->pixelaspect.numerator, p->pixelaspect.denominator);
551}
552
553static void v4l_print_crop(const void *arg, bool write_only)
554{
555 const struct v4l2_crop *p = arg;
556
557 pr_cont("type=%s, wxh=%dx%d, x,y=%d,%d\n",
558 prt_names(p->type, v4l2_type_names),
559 p->c.width, p->c.height,
560 p->c.left, p->c.top);
561}
562
563static void v4l_print_selection(const void *arg, bool write_only)
564{
565 const struct v4l2_selection *p = arg;
566
567 pr_cont("type=%s, target=%d, flags=0x%x, wxh=%dx%d, x,y=%d,%d\n",
568 prt_names(p->type, v4l2_type_names),
569 p->target, p->flags,
570 p->r.width, p->r.height, p->r.left, p->r.top);
571}
572
573static void v4l_print_jpegcompression(const void *arg, bool write_only)
574{
575 const struct v4l2_jpegcompression *p = arg;
576
577 pr_cont("quality=%d, APPn=%d, APP_len=%d, "
578 "COM_len=%d, jpeg_markers=0x%x\n",
579 p->quality, p->APPn, p->APP_len,
580 p->COM_len, p->jpeg_markers);
581}
582
583static void v4l_print_enc_idx(const void *arg, bool write_only)
584{
585 const struct v4l2_enc_idx *p = arg;
586
587 pr_cont("entries=%d, entries_cap=%d\n",
588 p->entries, p->entries_cap);
589}
590
591static void v4l_print_encoder_cmd(const void *arg, bool write_only)
592{
593 const struct v4l2_encoder_cmd *p = arg;
594
595 pr_cont("cmd=%d, flags=0x%x\n",
596 p->cmd, p->flags);
597}
598
599static void v4l_print_decoder_cmd(const void *arg, bool write_only)
600{
601 const struct v4l2_decoder_cmd *p = arg;
602
603 pr_cont("cmd=%d, flags=0x%x\n", p->cmd, p->flags);
604
605 if (p->cmd == V4L2_DEC_CMD_START)
606 pr_info("speed=%d, format=%u\n",
607 p->start.speed, p->start.format);
608 else if (p->cmd == V4L2_DEC_CMD_STOP)
609 pr_info("pts=%llu\n", p->stop.pts);
610}
611
612static void v4l_print_dbg_chip_ident(const void *arg, bool write_only)
613{
614 const struct v4l2_dbg_chip_ident *p = arg;
615
616 pr_cont("type=%u, ", p->match.type);
617 if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER)
618 pr_cont("name=%s, ", p->match.name);
619 else
620 pr_cont("addr=%u, ", p->match.addr);
621 pr_cont("chip_ident=%u, revision=0x%x\n",
622 p->ident, p->revision);
623}
624
625static void v4l_print_dbg_register(const void *arg, bool write_only)
626{
627 const struct v4l2_dbg_register *p = arg;
628
629 pr_cont("type=%u, ", p->match.type);
630 if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER)
631 pr_cont("name=%s, ", p->match.name);
632 else
633 pr_cont("addr=%u, ", p->match.addr);
634 pr_cont("reg=0x%llx, val=0x%llx\n",
635 p->reg, p->val);
636}
637
638static void v4l_print_dv_enum_presets(const void *arg, bool write_only)
639{
640 const struct v4l2_dv_enum_preset *p = arg;
641
642 pr_cont("index=%u, preset=%u, name=%s, width=%u, height=%u\n",
643 p->index, p->preset, p->name, p->width, p->height);
644}
645
646static void v4l_print_dv_preset(const void *arg, bool write_only)
647{
648 const struct v4l2_dv_preset *p = arg;
649
650 pr_cont("preset=%u\n", p->preset);
651}
652
653static void v4l_print_dv_timings(const void *arg, bool write_only)
654{
655 const struct v4l2_dv_timings *p = arg;
656
657 switch (p->type) {
658 case V4L2_DV_BT_656_1120:
659 pr_cont("type=bt-656/1120, interlaced=%u, "
660 "pixelclock=%llu, "
661 "width=%u, height=%u, polarities=0x%x, "
662 "hfrontporch=%u, hsync=%u, "
663 "hbackporch=%u, vfrontporch=%u, "
664 "vsync=%u, vbackporch=%u, "
665 "il_vfrontporch=%u, il_vsync=%u, "
666 "il_vbackporch=%u, standards=0x%x, flags=0x%x\n",
667 p->bt.interlaced, p->bt.pixelclock,
668 p->bt.width, p->bt.height,
669 p->bt.polarities, p->bt.hfrontporch,
670 p->bt.hsync, p->bt.hbackporch,
671 p->bt.vfrontporch, p->bt.vsync,
672 p->bt.vbackporch, p->bt.il_vfrontporch,
673 p->bt.il_vsync, p->bt.il_vbackporch,
674 p->bt.standards, p->bt.flags);
675 break;
676 default:
677 pr_cont("type=%d\n", p->type);
678 break;
679 }
680}
681
682static void v4l_print_enum_dv_timings(const void *arg, bool write_only)
683{
684 const struct v4l2_enum_dv_timings *p = arg;
685
686 pr_cont("index=%u, ", p->index);
687 v4l_print_dv_timings(&p->timings, write_only);
688}
689
690static void v4l_print_dv_timings_cap(const void *arg, bool write_only)
691{
692 const struct v4l2_dv_timings_cap *p = arg;
693
694 switch (p->type) {
695 case V4L2_DV_BT_656_1120:
696 pr_cont("type=bt-656/1120, width=%u-%u, height=%u-%u, "
697 "pixelclock=%llu-%llu, standards=0x%x, capabilities=0x%x\n",
698 p->bt.min_width, p->bt.max_width,
699 p->bt.min_height, p->bt.max_height,
700 p->bt.min_pixelclock, p->bt.max_pixelclock,
701 p->bt.standards, p->bt.capabilities);
702 break;
703 default:
704 pr_cont("type=%u\n", p->type);
705 break;
706 }
707}
708
709static void v4l_print_frmsizeenum(const void *arg, bool write_only)
710{
711 const struct v4l2_frmsizeenum *p = arg;
712
713 pr_cont("index=%u, pixelformat=%c%c%c%c, type=%u",
714 p->index,
715 (p->pixel_format & 0xff),
716 (p->pixel_format >> 8) & 0xff,
717 (p->pixel_format >> 16) & 0xff,
718 (p->pixel_format >> 24) & 0xff,
719 p->type);
720 switch (p->type) {
721 case V4L2_FRMSIZE_TYPE_DISCRETE:
722 pr_cont(" wxh=%ux%u\n",
723 p->discrete.width, p->discrete.height);
724 break;
725 case V4L2_FRMSIZE_TYPE_STEPWISE:
726 pr_cont(" min=%ux%u, max=%ux%u, step=%ux%u\n",
727 p->stepwise.min_width, p->stepwise.min_height,
728 p->stepwise.step_width, p->stepwise.step_height,
729 p->stepwise.max_width, p->stepwise.max_height);
730 break;
731 case V4L2_FRMSIZE_TYPE_CONTINUOUS:
732 /* fall through */
733 default:
734 pr_cont("\n");
735 break;
736 }
737}
738
739static void v4l_print_frmivalenum(const void *arg, bool write_only)
740{
741 const struct v4l2_frmivalenum *p = arg;
742
743 pr_cont("index=%u, pixelformat=%c%c%c%c, wxh=%ux%u, type=%u",
744 p->index,
745 (p->pixel_format & 0xff),
746 (p->pixel_format >> 8) & 0xff,
747 (p->pixel_format >> 16) & 0xff,
748 (p->pixel_format >> 24) & 0xff,
749 p->width, p->height, p->type);
750 switch (p->type) {
751 case V4L2_FRMIVAL_TYPE_DISCRETE:
752 pr_cont(" fps=%d/%d\n",
753 p->discrete.numerator,
754 p->discrete.denominator);
755 break;
756 case V4L2_FRMIVAL_TYPE_STEPWISE:
757 pr_cont(" min=%d/%d, max=%d/%d, step=%d/%d\n",
758 p->stepwise.min.numerator,
759 p->stepwise.min.denominator,
760 p->stepwise.max.numerator,
761 p->stepwise.max.denominator,
762 p->stepwise.step.numerator,
763 p->stepwise.step.denominator);
764 break;
765 case V4L2_FRMIVAL_TYPE_CONTINUOUS:
766 /* fall through */
767 default:
768 pr_cont("\n");
769 break;
770 }
771}
772
773static void v4l_print_event(const void *arg, bool write_only)
774{
775 const struct v4l2_event *p = arg;
776 const struct v4l2_event_ctrl *c;
777
778 pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, "
779 "timestamp=%lu.%9.9lu\n",
780 p->type, p->pending, p->sequence, p->id,
781 p->timestamp.tv_sec, p->timestamp.tv_nsec);
782 switch (p->type) {
783 case V4L2_EVENT_VSYNC:
784 printk(KERN_DEBUG "field=%s\n",
785 prt_names(p->u.vsync.field, v4l2_field_names));
786 break;
787 case V4L2_EVENT_CTRL:
788 c = &p->u.ctrl;
789 printk(KERN_DEBUG "changes=0x%x, type=%u, ",
790 c->changes, c->type);
791 if (c->type == V4L2_CTRL_TYPE_INTEGER64)
792 pr_cont("value64=%lld, ", c->value64);
793 else
794 pr_cont("value=%d, ", c->value);
795 pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d,"
796 " default_value=%d\n",
797 c->flags, c->minimum, c->maximum,
798 c->step, c->default_value);
799 break;
800 case V4L2_EVENT_FRAME_SYNC:
801 pr_cont("frame_sequence=%u\n",
802 p->u.frame_sync.frame_sequence);
803 break;
804 }
805}
806
807static void v4l_print_event_subscription(const void *arg, bool write_only)
808{
809 const struct v4l2_event_subscription *p = arg;
810
811 pr_cont("type=0x%x, id=0x%x, flags=0x%x\n",
812 p->type, p->id, p->flags);
813}
814
815static void v4l_print_sliced_vbi_cap(const void *arg, bool write_only)
816{
817 const struct v4l2_sliced_vbi_cap *p = arg;
818 int i;
819
820 pr_cont("type=%s, service_set=0x%08x\n",
821 prt_names(p->type, v4l2_type_names), p->service_set);
822 for (i = 0; i < 24; i++)
823 printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i,
824 p->service_lines[0][i],
825 p->service_lines[1][i]);
826}
827
828static void v4l_print_freq_band(const void *arg, bool write_only)
829{
830 const struct v4l2_frequency_band *p = arg;
831
832 pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, "
833 "rangelow=%u, rangehigh=%u, modulation=0x%x\n",
834 p->tuner, p->type, p->index,
835 p->capability, p->rangelow,
836 p->rangehigh, p->modulation);
837}
838
839static void v4l_print_u32(const void *arg, bool write_only)
840{
841 pr_cont("value=%u\n", *(const u32 *)arg);
842}
843
844static void v4l_print_newline(const void *arg, bool write_only)
845{
846 pr_cont("\n");
847}
848
849static void v4l_print_default(const void *arg, bool write_only)
850{
851 pr_cont("driver-specific ioctl\n");
852}
853
854static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
855{
856 __u32 i;
857
858 /* zero the reserved fields */
859 c->reserved[0] = c->reserved[1] = 0;
860 for (i = 0; i < c->count; i++)
861 c->controls[i].reserved2[0] = 0;
862
863 /* V4L2_CID_PRIVATE_BASE cannot be used as control class
864 when using extended controls.
865 Only when passed in through VIDIOC_G_CTRL and VIDIOC_S_CTRL
866 is it allowed for backwards compatibility.
867 */
868 if (!allow_priv && c->ctrl_class == V4L2_CID_PRIVATE_BASE)
869 return 0;
870 /* Check that all controls are from the same control class. */
871 for (i = 0; i < c->count; i++) {
872 if (V4L2_CTRL_ID2CLASS(c->controls[i].id) != c->ctrl_class) {
873 c->error_idx = i;
874 return 0;
875 }
876 }
877 return 1;
878}
879
880static int check_fmt(const struct v4l2_ioctl_ops *ops, enum v4l2_buf_type type)
881{
882 if (ops == NULL)
883 return -EINVAL;
884
885 switch (type) {
886 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
887 if (ops->vidioc_g_fmt_vid_cap ||
888 ops->vidioc_g_fmt_vid_cap_mplane)
889 return 0;
890 break;
891 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
892 if (ops->vidioc_g_fmt_vid_cap_mplane)
893 return 0;
894 break;
895 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
896 if (ops->vidioc_g_fmt_vid_overlay)
897 return 0;
898 break;
899 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
900 if (ops->vidioc_g_fmt_vid_out ||
901 ops->vidioc_g_fmt_vid_out_mplane)
902 return 0;
903 break;
904 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
905 if (ops->vidioc_g_fmt_vid_out_mplane)
906 return 0;
907 break;
908 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
909 if (ops->vidioc_g_fmt_vid_out_overlay)
910 return 0;
911 break;
912 case V4L2_BUF_TYPE_VBI_CAPTURE:
913 if (ops->vidioc_g_fmt_vbi_cap)
914 return 0;
915 break;
916 case V4L2_BUF_TYPE_VBI_OUTPUT:
917 if (ops->vidioc_g_fmt_vbi_out)
918 return 0;
919 break;
920 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
921 if (ops->vidioc_g_fmt_sliced_vbi_cap)
922 return 0;
923 break;
924 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
925 if (ops->vidioc_g_fmt_sliced_vbi_out)
926 return 0;
927 break;
928 case V4L2_BUF_TYPE_PRIVATE:
929 if (ops->vidioc_g_fmt_type_private)
930 return 0;
931 break;
932 }
933 return -EINVAL;
934}
935
936static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
937 struct file *file, void *fh, void *arg)
938{
939 struct v4l2_capability *cap = (struct v4l2_capability *)arg;
940
941 cap->version = LINUX_VERSION_CODE;
942 return ops->vidioc_querycap(file, fh, cap);
943}
944
945static int v4l_s_input(const struct v4l2_ioctl_ops *ops,
946 struct file *file, void *fh, void *arg)
947{
948 return ops->vidioc_s_input(file, fh, *(unsigned int *)arg);
949}
950
951static int v4l_s_output(const struct v4l2_ioctl_ops *ops,
952 struct file *file, void *fh, void *arg)
953{
954 return ops->vidioc_s_output(file, fh, *(unsigned int *)arg);
955}
956
957static int v4l_g_priority(const struct v4l2_ioctl_ops *ops,
958 struct file *file, void *fh, void *arg)
959{
960 struct video_device *vfd;
961 u32 *p = arg;
962
963 if (ops->vidioc_g_priority)
964 return ops->vidioc_g_priority(file, fh, arg);
965 vfd = video_devdata(file);
966 *p = v4l2_prio_max(&vfd->v4l2_dev->prio);
967 return 0;
968}
969
970static int v4l_s_priority(const struct v4l2_ioctl_ops *ops,
971 struct file *file, void *fh, void *arg)
972{
973 struct video_device *vfd;
974 struct v4l2_fh *vfh;
975 u32 *p = arg;
976
977 if (ops->vidioc_s_priority)
978 return ops->vidioc_s_priority(file, fh, *p);
979 vfd = video_devdata(file);
980 vfh = file->private_data;
981 return v4l2_prio_change(&vfd->v4l2_dev->prio, &vfh->prio, *p);
982}
983
984static int v4l_enuminput(const struct v4l2_ioctl_ops *ops,
985 struct file *file, void *fh, void *arg)
986{
987 struct v4l2_input *p = arg;
988
989 /*
990 * We set the flags for CAP_PRESETS, CAP_CUSTOM_TIMINGS &
991 * CAP_STD here based on ioctl handler provided by the
992 * driver. If the driver doesn't support these
993 * for a specific input, it must override these flags.
994 */
995 if (ops->vidioc_s_std)
996 p->capabilities |= V4L2_IN_CAP_STD;
997 if (ops->vidioc_s_dv_preset)
998 p->capabilities |= V4L2_IN_CAP_PRESETS;
999 if (ops->vidioc_s_dv_timings)
1000 p->capabilities |= V4L2_IN_CAP_CUSTOM_TIMINGS;
1001
1002 return ops->vidioc_enum_input(file, fh, p);
1003}
1004
1005static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops,
1006 struct file *file, void *fh, void *arg)
1007{
1008 struct v4l2_output *p = arg;
1009
1010 /*
1011 * We set the flags for CAP_PRESETS, CAP_CUSTOM_TIMINGS &
1012 * CAP_STD here based on ioctl handler provided by the
1013 * driver. If the driver doesn't support these
1014 * for a specific output, it must override these flags.
1015 */
1016 if (ops->vidioc_s_std)
1017 p->capabilities |= V4L2_OUT_CAP_STD;
1018 if (ops->vidioc_s_dv_preset)
1019 p->capabilities |= V4L2_OUT_CAP_PRESETS;
1020 if (ops->vidioc_s_dv_timings)
1021 p->capabilities |= V4L2_OUT_CAP_CUSTOM_TIMINGS;
1022
1023 return ops->vidioc_enum_output(file, fh, p);
1024}
1025
1026static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
1027 struct file *file, void *fh, void *arg)
1028{
1029 struct v4l2_fmtdesc *p = arg;
1030
1031 switch (p->type) {
1032 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1033 if (unlikely(!ops->vidioc_enum_fmt_vid_cap))
1034 break;
1035 return ops->vidioc_enum_fmt_vid_cap(file, fh, arg);
1036 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
1037 if (unlikely(!ops->vidioc_enum_fmt_vid_cap_mplane))
1038 break;
1039 return ops->vidioc_enum_fmt_vid_cap_mplane(file, fh, arg);
1040 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
1041 if (unlikely(!ops->vidioc_enum_fmt_vid_overlay))
1042 break;
1043 return ops->vidioc_enum_fmt_vid_overlay(file, fh, arg);
1044 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1045 if (unlikely(!ops->vidioc_enum_fmt_vid_out))
1046 break;
1047 return ops->vidioc_enum_fmt_vid_out(file, fh, arg);
1048 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
1049 if (unlikely(!ops->vidioc_enum_fmt_vid_out_mplane))
1050 break;
1051 return ops->vidioc_enum_fmt_vid_out_mplane(file, fh, arg);
1052 case V4L2_BUF_TYPE_PRIVATE:
1053 if (unlikely(!ops->vidioc_enum_fmt_type_private))
1054 break;
1055 return ops->vidioc_enum_fmt_type_private(file, fh, arg);
1056 }
1057 return -EINVAL;
1058}
1059
1060static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
1061 struct file *file, void *fh, void *arg)
1062{
1063 struct v4l2_format *p = arg;
1064
1065 switch (p->type) {
1066 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1067 if (unlikely(!ops->vidioc_g_fmt_vid_cap))
1068 break;
1069 return ops->vidioc_g_fmt_vid_cap(file, fh, arg);
1070 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
1071 if (unlikely(!ops->vidioc_g_fmt_vid_cap_mplane))
1072 break;
1073 return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg);
1074 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
1075 if (unlikely(!ops->vidioc_g_fmt_vid_overlay))
1076 break;
1077 return ops->vidioc_g_fmt_vid_overlay(file, fh, arg);
1078 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1079 if (unlikely(!ops->vidioc_g_fmt_vid_out))
1080 break;
1081 return ops->vidioc_g_fmt_vid_out(file, fh, arg);
1082 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
1083 if (unlikely(!ops->vidioc_g_fmt_vid_out_mplane))
1084 break;
1085 return ops->vidioc_g_fmt_vid_out_mplane(file, fh, arg);
1086 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
1087 if (unlikely(!ops->vidioc_g_fmt_vid_out_overlay))
1088 break;
1089 return ops->vidioc_g_fmt_vid_out_overlay(file, fh, arg);
1090 case V4L2_BUF_TYPE_VBI_CAPTURE:
1091 if (unlikely(!ops->vidioc_g_fmt_vbi_cap))
1092 break;
1093 return ops->vidioc_g_fmt_vbi_cap(file, fh, arg);
1094 case V4L2_BUF_TYPE_VBI_OUTPUT:
1095 if (unlikely(!ops->vidioc_g_fmt_vbi_out))
1096 break;
1097 return ops->vidioc_g_fmt_vbi_out(file, fh, arg);
1098 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
1099 if (unlikely(!ops->vidioc_g_fmt_sliced_vbi_cap))
1100 break;
1101 return ops->vidioc_g_fmt_sliced_vbi_cap(file, fh, arg);
1102 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
1103 if (unlikely(!ops->vidioc_g_fmt_sliced_vbi_out))
1104 break;
1105 return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg);
1106 case V4L2_BUF_TYPE_PRIVATE:
1107 if (unlikely(!ops->vidioc_g_fmt_type_private))
1108 break;
1109 return ops->vidioc_g_fmt_type_private(file, fh, arg);
1110 }
1111 return -EINVAL;
1112}
1113
1114static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
1115 struct file *file, void *fh, void *arg)
1116{
1117 struct v4l2_format *p = arg;
1118
1119 switch (p->type) {
1120 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1121 if (unlikely(!ops->vidioc_s_fmt_vid_cap))
1122 break;
1123 CLEAR_AFTER_FIELD(p, fmt.pix);
1124 return ops->vidioc_s_fmt_vid_cap(file, fh, arg);
1125 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
1126 if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane))
1127 break;
1128 CLEAR_AFTER_FIELD(p, fmt.pix_mp);
1129 return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
1130 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
1131 if (unlikely(!ops->vidioc_s_fmt_vid_overlay))
1132 break;
1133 CLEAR_AFTER_FIELD(p, fmt.win);
1134 return ops->vidioc_s_fmt_vid_overlay(file, fh, arg);
1135 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1136 if (unlikely(!ops->vidioc_s_fmt_vid_out))
1137 break;
1138 CLEAR_AFTER_FIELD(p, fmt.pix);
1139 return ops->vidioc_s_fmt_vid_out(file, fh, arg);
1140 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
1141 if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane))
1142 break;
1143 CLEAR_AFTER_FIELD(p, fmt.pix_mp);
1144 return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
1145 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
1146 if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay))
1147 break;
1148 CLEAR_AFTER_FIELD(p, fmt.win);
1149 return ops->vidioc_s_fmt_vid_out_overlay(file, fh, arg);
1150 case V4L2_BUF_TYPE_VBI_CAPTURE:
1151 if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
1152 break;
1153 CLEAR_AFTER_FIELD(p, fmt.vbi);
1154 return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
1155 case V4L2_BUF_TYPE_VBI_OUTPUT:
1156 if (unlikely(!ops->vidioc_s_fmt_vbi_out))
1157 break;
1158 CLEAR_AFTER_FIELD(p, fmt.vbi);
1159 return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
1160 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
1161 if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
1162 break;
1163 CLEAR_AFTER_FIELD(p, fmt.sliced);
1164 return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
1165 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
1166 if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
1167 break;
1168 CLEAR_AFTER_FIELD(p, fmt.sliced);
1169 return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
1170 case V4L2_BUF_TYPE_PRIVATE:
1171 if (unlikely(!ops->vidioc_s_fmt_type_private))
1172 break;
1173 return ops->vidioc_s_fmt_type_private(file, fh, arg);
1174 }
1175 return -EINVAL;
1176}
1177
1178static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
1179 struct file *file, void *fh, void *arg)
1180{
1181 struct v4l2_format *p = arg;
1182
1183 switch (p->type) {
1184 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1185 if (unlikely(!ops->vidioc_try_fmt_vid_cap))
1186 break;
1187 CLEAR_AFTER_FIELD(p, fmt.pix);
1188 return ops->vidioc_try_fmt_vid_cap(file, fh, arg);
1189 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
1190 if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane))
1191 break;
1192 CLEAR_AFTER_FIELD(p, fmt.pix_mp);
1193 return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
1194 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
1195 if (unlikely(!ops->vidioc_try_fmt_vid_overlay))
1196 break;
1197 CLEAR_AFTER_FIELD(p, fmt.win);
1198 return ops->vidioc_try_fmt_vid_overlay(file, fh, arg);
1199 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1200 if (unlikely(!ops->vidioc_try_fmt_vid_out))
1201 break;
1202 CLEAR_AFTER_FIELD(p, fmt.pix);
1203 return ops->vidioc_try_fmt_vid_out(file, fh, arg);
1204 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
1205 if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane))
1206 break;
1207 CLEAR_AFTER_FIELD(p, fmt.pix_mp);
1208 return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
1209 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
1210 if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay))
1211 break;
1212 CLEAR_AFTER_FIELD(p, fmt.win);
1213 return ops->vidioc_try_fmt_vid_out_overlay(file, fh, arg);
1214 case V4L2_BUF_TYPE_VBI_CAPTURE:
1215 if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
1216 break;
1217 CLEAR_AFTER_FIELD(p, fmt.vbi);
1218 return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
1219 case V4L2_BUF_TYPE_VBI_OUTPUT:
1220 if (unlikely(!ops->vidioc_try_fmt_vbi_out))
1221 break;
1222 CLEAR_AFTER_FIELD(p, fmt.vbi);
1223 return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
1224 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
1225 if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
1226 break;
1227 CLEAR_AFTER_FIELD(p, fmt.sliced);
1228 return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
1229 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
1230 if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
1231 break;
1232 CLEAR_AFTER_FIELD(p, fmt.sliced);
1233 return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
1234 case V4L2_BUF_TYPE_PRIVATE:
1235 if (unlikely(!ops->vidioc_try_fmt_type_private))
1236 break;
1237 return ops->vidioc_try_fmt_type_private(file, fh, arg);
1238 }
1239 return -EINVAL;
1240}
1241
1242static int v4l_streamon(const struct v4l2_ioctl_ops *ops,
1243 struct file *file, void *fh, void *arg)
1244{
1245 return ops->vidioc_streamon(file, fh, *(unsigned int *)arg);
1246}
1247
1248static int v4l_streamoff(const struct v4l2_ioctl_ops *ops,
1249 struct file *file, void *fh, void *arg)
1250{
1251 return ops->vidioc_streamoff(file, fh, *(unsigned int *)arg);
1252}
1253
1254static int v4l_g_tuner(const struct v4l2_ioctl_ops *ops,
1255 struct file *file, void *fh, void *arg)
1256{
1257 struct video_device *vfd = video_devdata(file);
1258 struct v4l2_tuner *p = arg;
1259 int err;
1260
1261 p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
1262 V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
1263 err = ops->vidioc_g_tuner(file, fh, p);
1264 if (!err)
1265 p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
1266 return err;
1267}
1268
1269static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops,
1270 struct file *file, void *fh, void *arg)
1271{
1272 struct video_device *vfd = video_devdata(file);
1273 struct v4l2_tuner *p = arg;
1274
1275 p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
1276 V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
1277 return ops->vidioc_s_tuner(file, fh, p);
1278}
1279
1280static int v4l_g_modulator(const struct v4l2_ioctl_ops *ops,
1281 struct file *file, void *fh, void *arg)
1282{
1283 struct v4l2_modulator *p = arg;
1284 int err;
1285
1286 err = ops->vidioc_g_modulator(file, fh, p);
1287 if (!err)
1288 p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
1289 return err;
1290}
1291
1292static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops,
1293 struct file *file, void *fh, void *arg)
1294{
1295 struct video_device *vfd = video_devdata(file);
1296 struct v4l2_frequency *p = arg;
1297
1298 p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
1299 V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
1300 return ops->vidioc_g_frequency(file, fh, p);
1301}
1302
1303static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops,
1304 struct file *file, void *fh, void *arg)
1305{
1306 struct video_device *vfd = video_devdata(file);
1307 struct v4l2_frequency *p = arg;
1308 enum v4l2_tuner_type type;
1309
1310 type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
1311 V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
1312 if (p->type != type)
1313 return -EINVAL;
1314 return ops->vidioc_s_frequency(file, fh, p);
1315}
1316
1317static int v4l_enumstd(const struct v4l2_ioctl_ops *ops,
1318 struct file *file, void *fh, void *arg)
1319{
1320 struct video_device *vfd = video_devdata(file);
1321 struct v4l2_standard *p = arg;
1322 v4l2_std_id id = vfd->tvnorms, curr_id = 0;
1323 unsigned int index = p->index, i, j = 0;
1324 const char *descr = "";
1325
1326 /* Return norm array in a canonical way */
1327 for (i = 0; i <= index && id; i++) {
1328 /* last std value in the standards array is 0, so this
1329 while always ends there since (id & 0) == 0. */
1330 while ((id & standards[j].std) != standards[j].std)
1331 j++;
1332 curr_id = standards[j].std;
1333 descr = standards[j].descr;
1334 j++;
1335 if (curr_id == 0)
1336 break;
1337 if (curr_id != V4L2_STD_PAL &&
1338 curr_id != V4L2_STD_SECAM &&
1339 curr_id != V4L2_STD_NTSC)
1340 id &= ~curr_id;
1341 }
1342 if (i <= index)
1343 return -EINVAL;
1344
1345 v4l2_video_std_construct(p, curr_id, descr);
1346 return 0;
1347}
1348
1349static int v4l_g_std(const struct v4l2_ioctl_ops *ops,
1350 struct file *file, void *fh, void *arg)
1351{
1352 struct video_device *vfd = video_devdata(file);
1353 v4l2_std_id *id = arg;
1354
1355 /* Calls the specific handler */
1356 if (ops->vidioc_g_std)
1357 return ops->vidioc_g_std(file, fh, arg);
1358 if (vfd->current_norm) {
1359 *id = vfd->current_norm;
1360 return 0;
1361 }
1362 return -ENOTTY;
1363}
1364
1365static int v4l_s_std(const struct v4l2_ioctl_ops *ops,
1366 struct file *file, void *fh, void *arg)
1367{
1368 struct video_device *vfd = video_devdata(file);
1369 v4l2_std_id *id = arg, norm;
1370 int ret;
1371
1372 norm = (*id) & vfd->tvnorms;
1373 if (vfd->tvnorms && !norm) /* Check if std is supported */
1374 return -EINVAL;
1375
1376 /* Calls the specific handler */
1377 ret = ops->vidioc_s_std(file, fh, &norm);
1378
1379 /* Updates standard information */
1380 if (ret >= 0)
1381 vfd->current_norm = norm;
1382 return ret;
1383}
1384
1385static int v4l_querystd(const struct v4l2_ioctl_ops *ops,
1386 struct file *file, void *fh, void *arg)
1387{
1388 struct video_device *vfd = video_devdata(file);
1389 v4l2_std_id *p = arg;
1390
1391 /*
1392 * If nothing detected, it should return all supported
1393 * standard.
1394 * Drivers just need to mask the std argument, in order
1395 * to remove the standards that don't apply from the mask.
1396 * This means that tuners, audio and video decoders can join
1397 * their efforts to improve the standards detection.
1398 */
1399 *p = vfd->tvnorms;
1400 return ops->vidioc_querystd(file, fh, arg);
1401}
1402
1403static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops,
1404 struct file *file, void *fh, void *arg)
1405{
1406 struct video_device *vfd = video_devdata(file);
1407 struct v4l2_hw_freq_seek *p = arg;
1408 enum v4l2_tuner_type type;
1409
1410 type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
1411 V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
1412 if (p->type != type)
1413 return -EINVAL;
1414 return ops->vidioc_s_hw_freq_seek(file, fh, p);
1415}
1416
1417static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops,
1418 struct file *file, void *fh, void *arg)
1419{
1420 struct v4l2_requestbuffers *p = arg;
1421 int ret = check_fmt(ops, p->type);
1422
1423 if (ret)
1424 return ret;
1425
1426 if (p->type < V4L2_BUF_TYPE_PRIVATE)
1427 CLEAR_AFTER_FIELD(p, memory);
1428
1429 return ops->vidioc_reqbufs(file, fh, p);
1430}
1431
1432static int v4l_querybuf(const struct v4l2_ioctl_ops *ops,
1433 struct file *file, void *fh, void *arg)
1434{
1435 struct v4l2_buffer *p = arg;
1436 int ret = check_fmt(ops, p->type);
1437
1438 return ret ? ret : ops->vidioc_querybuf(file, fh, p);
1439}
1440
1441static int v4l_qbuf(const struct v4l2_ioctl_ops *ops,
1442 struct file *file, void *fh, void *arg)
1443{
1444 struct v4l2_buffer *p = arg;
1445 int ret = check_fmt(ops, p->type);
1446
1447 return ret ? ret : ops->vidioc_qbuf(file, fh, p);
1448}
1449
1450static int v4l_dqbuf(const struct v4l2_ioctl_ops *ops,
1451 struct file *file, void *fh, void *arg)
1452{
1453 struct v4l2_buffer *p = arg;
1454 int ret = check_fmt(ops, p->type);
1455
1456 return ret ? ret : ops->vidioc_dqbuf(file, fh, p);
1457}
1458
1459static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
1460 struct file *file, void *fh, void *arg)
1461{
1462 struct v4l2_create_buffers *create = arg;
1463 int ret = check_fmt(ops, create->format.type);
1464
1465 return ret ? ret : ops->vidioc_create_bufs(file, fh, create);
1466}
1467
1468static int v4l_prepare_buf(const struct v4l2_ioctl_ops *ops,
1469 struct file *file, void *fh, void *arg)
1470{
1471 struct v4l2_buffer *b = arg;
1472 int ret = check_fmt(ops, b->type);
1473
1474 return ret ? ret : ops->vidioc_prepare_buf(file, fh, b);
1475}
1476
1477static int v4l_g_parm(const struct v4l2_ioctl_ops *ops,
1478 struct file *file, void *fh, void *arg)
1479{
1480 struct video_device *vfd = video_devdata(file);
1481 struct v4l2_streamparm *p = arg;
1482 v4l2_std_id std;
1483 int ret = check_fmt(ops, p->type);
1484
1485 if (ret)
1486 return ret;
1487 if (ops->vidioc_g_parm)
1488 return ops->vidioc_g_parm(file, fh, p);
1489 std = vfd->current_norm;
1490 if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
1491 p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1492 return -EINVAL;
1493 p->parm.capture.readbuffers = 2;
1494 if (ops->vidioc_g_std)
1495 ret = ops->vidioc_g_std(file, fh, &std);
1496 if (ret == 0)
1497 v4l2_video_std_frame_period(std,
1498 &p->parm.capture.timeperframe);
1499 return ret;
1500}
1501
1502static int v4l_s_parm(const struct v4l2_ioctl_ops *ops,
1503 struct file *file, void *fh, void *arg)
1504{
1505 struct v4l2_streamparm *p = arg;
1506 int ret = check_fmt(ops, p->type);
1507
1508 return ret ? ret : ops->vidioc_s_parm(file, fh, p);
1509}
1510
1511static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops,
1512 struct file *file, void *fh, void *arg)
1513{
1514 struct video_device *vfd = video_devdata(file);
1515 struct v4l2_queryctrl *p = arg;
1516 struct v4l2_fh *vfh =
1517 test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
1518
1519 if (vfh && vfh->ctrl_handler)
1520 return v4l2_queryctrl(vfh->ctrl_handler, p);
1521 if (vfd->ctrl_handler)
1522 return v4l2_queryctrl(vfd->ctrl_handler, p);
1523 if (ops->vidioc_queryctrl)
1524 return ops->vidioc_queryctrl(file, fh, p);
1525 return -ENOTTY;
1526}
1527
1528static int v4l_querymenu(const struct v4l2_ioctl_ops *ops,
1529 struct file *file, void *fh, void *arg)
1530{
1531 struct video_device *vfd = video_devdata(file);
1532 struct v4l2_querymenu *p = arg;
1533 struct v4l2_fh *vfh =
1534 test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
1535
1536 if (vfh && vfh->ctrl_handler)
1537 return v4l2_querymenu(vfh->ctrl_handler, p);
1538 if (vfd->ctrl_handler)
1539 return v4l2_querymenu(vfd->ctrl_handler, p);
1540 if (ops->vidioc_querymenu)
1541 return ops->vidioc_querymenu(file, fh, p);
1542 return -ENOTTY;
1543}
1544
1545static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
1546 struct file *file, void *fh, void *arg)
1547{
1548 struct video_device *vfd = video_devdata(file);
1549 struct v4l2_control *p = arg;
1550 struct v4l2_fh *vfh =
1551 test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
1552 struct v4l2_ext_controls ctrls;
1553 struct v4l2_ext_control ctrl;
1554
1555 if (vfh && vfh->ctrl_handler)
1556 return v4l2_g_ctrl(vfh->ctrl_handler, p);
1557 if (vfd->ctrl_handler)
1558 return v4l2_g_ctrl(vfd->ctrl_handler, p);
1559 if (ops->vidioc_g_ctrl)
1560 return ops->vidioc_g_ctrl(file, fh, p);
1561 if (ops->vidioc_g_ext_ctrls == NULL)
1562 return -ENOTTY;
1563
1564 ctrls.ctrl_class = V4L2_CTRL_ID2CLASS(p->id);
1565 ctrls.count = 1;
1566 ctrls.controls = &ctrl;
1567 ctrl.id = p->id;
1568 ctrl.value = p->value;
1569 if (check_ext_ctrls(&ctrls, 1)) {
1570 int ret = ops->vidioc_g_ext_ctrls(file, fh, &ctrls);
1571
1572 if (ret == 0)
1573 p->value = ctrl.value;
1574 return ret;
1575 }
1576 return -EINVAL;
1577}
1578
1579static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops,
1580 struct file *file, void *fh, void *arg)
1581{
1582 struct video_device *vfd = video_devdata(file);
1583 struct v4l2_control *p = arg;
1584 struct v4l2_fh *vfh =
1585 test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
1586 struct v4l2_ext_controls ctrls;
1587 struct v4l2_ext_control ctrl;
1588
1589 if (vfh && vfh->ctrl_handler)
1590 return v4l2_s_ctrl(vfh, vfh->ctrl_handler, p);
1591 if (vfd->ctrl_handler)
1592 return v4l2_s_ctrl(NULL, vfd->ctrl_handler, p);
1593 if (ops->vidioc_s_ctrl)
1594 return ops->vidioc_s_ctrl(file, fh, p);
1595 if (ops->vidioc_s_ext_ctrls == NULL)
1596 return -ENOTTY;
1597
1598 ctrls.ctrl_class = V4L2_CTRL_ID2CLASS(p->id);
1599 ctrls.count = 1;
1600 ctrls.controls = &ctrl;
1601 ctrl.id = p->id;
1602 ctrl.value = p->value;
1603 if (check_ext_ctrls(&ctrls, 1))
1604 return ops->vidioc_s_ext_ctrls(file, fh, &ctrls);
1605 return -EINVAL;
1606}
1607
1608static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
1609 struct file *file, void *fh, void *arg)
1610{
1611 struct video_device *vfd = video_devdata(file);
1612 struct v4l2_ext_controls *p = arg;
1613 struct v4l2_fh *vfh =
1614 test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
1615
1616 p->error_idx = p->count;
1617 if (vfh && vfh->ctrl_handler)
1618 return v4l2_g_ext_ctrls(vfh->ctrl_handler, p);
1619 if (vfd->ctrl_handler)
1620 return v4l2_g_ext_ctrls(vfd->ctrl_handler, p);
1621 if (ops->vidioc_g_ext_ctrls == NULL)
1622 return -ENOTTY;
1623 return check_ext_ctrls(p, 0) ? ops->vidioc_g_ext_ctrls(file, fh, p) :
1624 -EINVAL;
1625}
1626
1627static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
1628 struct file *file, void *fh, void *arg)
1629{
1630 struct video_device *vfd = video_devdata(file);
1631 struct v4l2_ext_controls *p = arg;
1632 struct v4l2_fh *vfh =
1633 test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
1634
1635 p->error_idx = p->count;
1636 if (vfh && vfh->ctrl_handler)
1637 return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, p);
1638 if (vfd->ctrl_handler)
1639 return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, p);
1640 if (ops->vidioc_s_ext_ctrls == NULL)
1641 return -ENOTTY;
1642 return check_ext_ctrls(p, 0) ? ops->vidioc_s_ext_ctrls(file, fh, p) :
1643 -EINVAL;
1644}
1645
1646static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
1647 struct file *file, void *fh, void *arg)
1648{
1649 struct video_device *vfd = video_devdata(file);
1650 struct v4l2_ext_controls *p = arg;
1651 struct v4l2_fh *vfh =
1652 test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
1653
1654 p->error_idx = p->count;
1655 if (vfh && vfh->ctrl_handler)
1656 return v4l2_try_ext_ctrls(vfh->ctrl_handler, p);
1657 if (vfd->ctrl_handler)
1658 return v4l2_try_ext_ctrls(vfd->ctrl_handler, p);
1659 if (ops->vidioc_try_ext_ctrls == NULL)
1660 return -ENOTTY;
1661 return check_ext_ctrls(p, 0) ? ops->vidioc_try_ext_ctrls(file, fh, p) :
1662 -EINVAL;
1663}
1664
1665static int v4l_g_crop(const struct v4l2_ioctl_ops *ops,
1666 struct file *file, void *fh, void *arg)
1667{
1668 struct v4l2_crop *p = arg;
1669 struct v4l2_selection s = {
1670 .type = p->type,
1671 };
1672 int ret;
1673
1674 if (ops->vidioc_g_crop)
1675 return ops->vidioc_g_crop(file, fh, p);
1676 /* simulate capture crop using selection api */
1677
1678 /* crop means compose for output devices */
1679 if (V4L2_TYPE_IS_OUTPUT(p->type))
1680 s.target = V4L2_SEL_TGT_COMPOSE_ACTIVE;
1681 else
1682 s.target = V4L2_SEL_TGT_CROP_ACTIVE;
1683
1684 ret = ops->vidioc_g_selection(file, fh, &s);
1685
1686 /* copying results to old structure on success */
1687 if (!ret)
1688 p->c = s.r;
1689 return ret;
1690}
1691
1692static int v4l_s_crop(const struct v4l2_ioctl_ops *ops,
1693 struct file *file, void *fh, void *arg)
1694{
1695 struct v4l2_crop *p = arg;
1696 struct v4l2_selection s = {
1697 .type = p->type,
1698 .r = p->c,
1699 };
1700
1701 if (ops->vidioc_s_crop)
1702 return ops->vidioc_s_crop(file, fh, p);
1703 /* simulate capture crop using selection api */
1704
1705 /* crop means compose for output devices */
1706 if (V4L2_TYPE_IS_OUTPUT(p->type))
1707 s.target = V4L2_SEL_TGT_COMPOSE_ACTIVE;
1708 else
1709 s.target = V4L2_SEL_TGT_CROP_ACTIVE;
1710
1711 return ops->vidioc_s_selection(file, fh, &s);
1712}
1713
1714static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
1715 struct file *file, void *fh, void *arg)
1716{
1717 struct v4l2_cropcap *p = arg;
1718 struct v4l2_selection s = { .type = p->type };
1719 int ret;
1720
1721 if (ops->vidioc_cropcap)
1722 return ops->vidioc_cropcap(file, fh, p);
1723
1724 /* obtaining bounds */
1725 if (V4L2_TYPE_IS_OUTPUT(p->type))
1726 s.target = V4L2_SEL_TGT_COMPOSE_BOUNDS;
1727 else
1728 s.target = V4L2_SEL_TGT_CROP_BOUNDS;
1729
1730 ret = ops->vidioc_g_selection(file, fh, &s);
1731 if (ret)
1732 return ret;
1733 p->bounds = s.r;
1734
1735 /* obtaining defrect */
1736 if (V4L2_TYPE_IS_OUTPUT(p->type))
1737 s.target = V4L2_SEL_TGT_COMPOSE_DEFAULT;
1738 else
1739 s.target = V4L2_SEL_TGT_CROP_DEFAULT;
1740
1741 ret = ops->vidioc_g_selection(file, fh, &s);
1742 if (ret)
1743 return ret;
1744 p->defrect = s.r;
1745
1746 /* setting trivial pixelaspect */
1747 p->pixelaspect.numerator = 1;
1748 p->pixelaspect.denominator = 1;
1749 return 0;
1750}
1751
1752static int v4l_log_status(const struct v4l2_ioctl_ops *ops,
1753 struct file *file, void *fh, void *arg)
1754{
1755 struct video_device *vfd = video_devdata(file);
1756 int ret;
1757
1758 if (vfd->v4l2_dev)
1759 pr_info("%s: ================= START STATUS =================\n",
1760 vfd->v4l2_dev->name);
1761 ret = ops->vidioc_log_status(file, fh);
1762 if (vfd->v4l2_dev)
1763 pr_info("%s: ================== END STATUS ==================\n",
1764 vfd->v4l2_dev->name);
1765 return ret;
1766}
1767
1768static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops,
1769 struct file *file, void *fh, void *arg)
1770{
1771#ifdef CONFIG_VIDEO_ADV_DEBUG
1772 struct v4l2_dbg_register *p = arg;
1773
1774 if (!capable(CAP_SYS_ADMIN))
1775 return -EPERM;
1776 return ops->vidioc_g_register(file, fh, p);
1777#else
1778 return -ENOTTY;
1779#endif
1780}
1781
1782static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops,
1783 struct file *file, void *fh, void *arg)
1784{
1785#ifdef CONFIG_VIDEO_ADV_DEBUG
1786 struct v4l2_dbg_register *p = arg;
1787
1788 if (!capable(CAP_SYS_ADMIN))
1789 return -EPERM;
1790 return ops->vidioc_s_register(file, fh, p);
1791#else
1792 return -ENOTTY;
1793#endif
1794}
1795
1796static int v4l_dbg_g_chip_ident(const struct v4l2_ioctl_ops *ops,
1797 struct file *file, void *fh, void *arg)
1798{
1799 struct v4l2_dbg_chip_ident *p = arg;
1800
1801 p->ident = V4L2_IDENT_NONE;
1802 p->revision = 0;
1803 return ops->vidioc_g_chip_ident(file, fh, p);
1804}
1805
1806static int v4l_dqevent(const struct v4l2_ioctl_ops *ops,
1807 struct file *file, void *fh, void *arg)
1808{
1809 return v4l2_event_dequeue(fh, arg, file->f_flags & O_NONBLOCK);
1810}
1811
1812static int v4l_subscribe_event(const struct v4l2_ioctl_ops *ops,
1813 struct file *file, void *fh, void *arg)
1814{
1815 return ops->vidioc_subscribe_event(fh, arg);
1816}
1817
1818static int v4l_unsubscribe_event(const struct v4l2_ioctl_ops *ops,
1819 struct file *file, void *fh, void *arg)
1820{
1821 return ops->vidioc_unsubscribe_event(fh, arg);
1822}
1823
1824static int v4l_g_sliced_vbi_cap(const struct v4l2_ioctl_ops *ops,
1825 struct file *file, void *fh, void *arg)
1826{
1827 struct v4l2_sliced_vbi_cap *p = arg;
1828
1829 /* Clear up to type, everything after type is zeroed already */
1830 memset(p, 0, offsetof(struct v4l2_sliced_vbi_cap, type));
1831
1832 return ops->vidioc_g_sliced_vbi_cap(file, fh, p);
1833}
1834
1835static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
1836 struct file *file, void *fh, void *arg)
1837{
1838 struct video_device *vfd = video_devdata(file);
1839 struct v4l2_frequency_band *p = arg;
1840 enum v4l2_tuner_type type;
1841 int err;
1842
1843 type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
1844 V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
1845
1846 if (type != p->type)
1847 return -EINVAL;
1848 if (ops->vidioc_enum_freq_bands)
1849 return ops->vidioc_enum_freq_bands(file, fh, p);
1850 if (ops->vidioc_g_tuner) {
1851 struct v4l2_tuner t = {
1852 .index = p->tuner,
1853 .type = type,
1854 };
1855
1856 err = ops->vidioc_g_tuner(file, fh, &t);
1857 if (err)
1858 return err;
1859 p->capability = t.capability | V4L2_TUNER_CAP_FREQ_BANDS;
1860 p->rangelow = t.rangelow;
1861 p->rangehigh = t.rangehigh;
1862 p->modulation = (type == V4L2_TUNER_RADIO) ?
1863 V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB;
1864 return 0;
1865 }
1866 if (ops->vidioc_g_modulator) {
1867 struct v4l2_modulator m = {
1868 .index = p->tuner,
1869 };
1870
1871 if (type != V4L2_TUNER_RADIO)
1872 return -EINVAL;
1873 err = ops->vidioc_g_modulator(file, fh, &m);
1874 if (err)
1875 return err;
1876 p->capability = m.capability | V4L2_TUNER_CAP_FREQ_BANDS;
1877 p->rangelow = m.rangelow;
1878 p->rangehigh = m.rangehigh;
1879 p->modulation = (type == V4L2_TUNER_RADIO) ?
1880 V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB;
1881 return 0;
1882 }
1883 return -ENOTTY;
1884}
1885
1886struct v4l2_ioctl_info {
1887 unsigned int ioctl;
1888 u32 flags;
1889 const char * const name;
1890 union {
1891 u32 offset;
1892 int (*func)(const struct v4l2_ioctl_ops *ops,
1893 struct file *file, void *fh, void *p);
1894 } u;
1895 void (*debug)(const void *arg, bool write_only);
1896};
1897
1898/* This control needs a priority check */
1899#define INFO_FL_PRIO (1 << 0)
1900/* This control can be valid if the filehandle passes a control handler. */
1901#define INFO_FL_CTRL (1 << 1)
1902/* This is a standard ioctl, no need for special code */
1903#define INFO_FL_STD (1 << 2)
1904/* This is ioctl has its own function */
1905#define INFO_FL_FUNC (1 << 3)
1906/* Queuing ioctl */
1907#define INFO_FL_QUEUE (1 << 4)
1908/* Zero struct from after the field to the end */
1909#define INFO_FL_CLEAR(v4l2_struct, field) \
1910 ((offsetof(struct v4l2_struct, field) + \
1911 sizeof(((struct v4l2_struct *)0)->field)) << 16)
1912#define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16)
1913
1914#define IOCTL_INFO_STD(_ioctl, _vidioc, _debug, _flags) \
1915 [_IOC_NR(_ioctl)] = { \
1916 .ioctl = _ioctl, \
1917 .flags = _flags | INFO_FL_STD, \
1918 .name = #_ioctl, \
1919 .u.offset = offsetof(struct v4l2_ioctl_ops, _vidioc), \
1920 .debug = _debug, \
1921 }
1922
1923#define IOCTL_INFO_FNC(_ioctl, _func, _debug, _flags) \
1924 [_IOC_NR(_ioctl)] = { \
1925 .ioctl = _ioctl, \
1926 .flags = _flags | INFO_FL_FUNC, \
1927 .name = #_ioctl, \
1928 .u.func = _func, \
1929 .debug = _debug, \
1930 }
1931
1932static struct v4l2_ioctl_info v4l2_ioctls[] = {
1933 IOCTL_INFO_FNC(VIDIOC_QUERYCAP, v4l_querycap, v4l_print_querycap, 0),
1934 IOCTL_INFO_FNC(VIDIOC_ENUM_FMT, v4l_enum_fmt, v4l_print_fmtdesc, INFO_FL_CLEAR(v4l2_fmtdesc, type)),
1935 IOCTL_INFO_FNC(VIDIOC_G_FMT, v4l_g_fmt, v4l_print_format, INFO_FL_CLEAR(v4l2_format, type)),
1936 IOCTL_INFO_FNC(VIDIOC_S_FMT, v4l_s_fmt, v4l_print_format, INFO_FL_PRIO),
1937 IOCTL_INFO_FNC(VIDIOC_REQBUFS, v4l_reqbufs, v4l_print_requestbuffers, INFO_FL_PRIO | INFO_FL_QUEUE),
1938 IOCTL_INFO_FNC(VIDIOC_QUERYBUF, v4l_querybuf, v4l_print_buffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_buffer, length)),
1939 IOCTL_INFO_STD(VIDIOC_G_FBUF, vidioc_g_fbuf, v4l_print_framebuffer, 0),
1940 IOCTL_INFO_STD(VIDIOC_S_FBUF, vidioc_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO),
1941 IOCTL_INFO_STD(VIDIOC_OVERLAY, vidioc_overlay, v4l_print_u32, INFO_FL_PRIO),
1942 IOCTL_INFO_FNC(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE),
1943 IOCTL_INFO_FNC(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE),
1944 IOCTL_INFO_FNC(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
1945 IOCTL_INFO_FNC(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
1946 IOCTL_INFO_FNC(VIDIOC_G_PARM, v4l_g_parm, v4l_print_streamparm, INFO_FL_CLEAR(v4l2_streamparm, type)),
1947 IOCTL_INFO_FNC(VIDIOC_S_PARM, v4l_s_parm, v4l_print_streamparm, INFO_FL_PRIO),
1948 IOCTL_INFO_FNC(VIDIOC_G_STD, v4l_g_std, v4l_print_std, 0),
1949 IOCTL_INFO_FNC(VIDIOC_S_STD, v4l_s_std, v4l_print_std, INFO_FL_PRIO),
1950 IOCTL_INFO_FNC(VIDIOC_ENUMSTD, v4l_enumstd, v4l_print_standard, INFO_FL_CLEAR(v4l2_standard, index)),
1951 IOCTL_INFO_FNC(VIDIOC_ENUMINPUT, v4l_enuminput, v4l_print_enuminput, INFO_FL_CLEAR(v4l2_input, index)),
1952 IOCTL_INFO_FNC(VIDIOC_G_CTRL, v4l_g_ctrl, v4l_print_control, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_control, id)),
1953 IOCTL_INFO_FNC(VIDIOC_S_CTRL, v4l_s_ctrl, v4l_print_control, INFO_FL_PRIO | INFO_FL_CTRL),
1954 IOCTL_INFO_FNC(VIDIOC_G_TUNER, v4l_g_tuner, v4l_print_tuner, INFO_FL_CLEAR(v4l2_tuner, index)),
1955 IOCTL_INFO_FNC(VIDIOC_S_TUNER, v4l_s_tuner, v4l_print_tuner, INFO_FL_PRIO),
1956 IOCTL_INFO_STD(VIDIOC_G_AUDIO, vidioc_g_audio, v4l_print_audio, 0),
1957 IOCTL_INFO_STD(VIDIOC_S_AUDIO, vidioc_s_audio, v4l_print_audio, INFO_FL_PRIO),
1958 IOCTL_INFO_FNC(VIDIOC_QUERYCTRL, v4l_queryctrl, v4l_print_queryctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_queryctrl, id)),
1959 IOCTL_INFO_FNC(VIDIOC_QUERYMENU, v4l_querymenu, v4l_print_querymenu, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_querymenu, index)),
1960 IOCTL_INFO_STD(VIDIOC_G_INPUT, vidioc_g_input, v4l_print_u32, 0),
1961 IOCTL_INFO_FNC(VIDIOC_S_INPUT, v4l_s_input, v4l_print_u32, INFO_FL_PRIO),
1962 IOCTL_INFO_STD(VIDIOC_G_OUTPUT, vidioc_g_output, v4l_print_u32, 0),
1963 IOCTL_INFO_FNC(VIDIOC_S_OUTPUT, v4l_s_output, v4l_print_u32, INFO_FL_PRIO),
1964 IOCTL_INFO_FNC(VIDIOC_ENUMOUTPUT, v4l_enumoutput, v4l_print_enumoutput, INFO_FL_CLEAR(v4l2_output, index)),
1965 IOCTL_INFO_STD(VIDIOC_G_AUDOUT, vidioc_g_audout, v4l_print_audioout, 0),
1966 IOCTL_INFO_STD(VIDIOC_S_AUDOUT, vidioc_s_audout, v4l_print_audioout, INFO_FL_PRIO),
1967 IOCTL_INFO_FNC(VIDIOC_G_MODULATOR, v4l_g_modulator, v4l_print_modulator, INFO_FL_CLEAR(v4l2_modulator, index)),
1968 IOCTL_INFO_STD(VIDIOC_S_MODULATOR, vidioc_s_modulator, v4l_print_modulator, INFO_FL_PRIO),
1969 IOCTL_INFO_FNC(VIDIOC_G_FREQUENCY, v4l_g_frequency, v4l_print_frequency, INFO_FL_CLEAR(v4l2_frequency, tuner)),
1970 IOCTL_INFO_FNC(VIDIOC_S_FREQUENCY, v4l_s_frequency, v4l_print_frequency, INFO_FL_PRIO),
1971 IOCTL_INFO_FNC(VIDIOC_CROPCAP, v4l_cropcap, v4l_print_cropcap, INFO_FL_CLEAR(v4l2_cropcap, type)),
1972 IOCTL_INFO_FNC(VIDIOC_G_CROP, v4l_g_crop, v4l_print_crop, INFO_FL_CLEAR(v4l2_crop, type)),
1973 IOCTL_INFO_FNC(VIDIOC_S_CROP, v4l_s_crop, v4l_print_crop, INFO_FL_PRIO),
1974 IOCTL_INFO_STD(VIDIOC_G_SELECTION, vidioc_g_selection, v4l_print_selection, 0),
1975 IOCTL_INFO_STD(VIDIOC_S_SELECTION, vidioc_s_selection, v4l_print_selection, INFO_FL_PRIO),
1976 IOCTL_INFO_STD(VIDIOC_G_JPEGCOMP, vidioc_g_jpegcomp, v4l_print_jpegcompression, 0),
1977 IOCTL_INFO_STD(VIDIOC_S_JPEGCOMP, vidioc_s_jpegcomp, v4l_print_jpegcompression, INFO_FL_PRIO),
1978 IOCTL_INFO_FNC(VIDIOC_QUERYSTD, v4l_querystd, v4l_print_std, 0),
1979 IOCTL_INFO_FNC(VIDIOC_TRY_FMT, v4l_try_fmt, v4l_print_format, 0),
1980 IOCTL_INFO_STD(VIDIOC_ENUMAUDIO, vidioc_enumaudio, v4l_print_audio, INFO_FL_CLEAR(v4l2_audio, index)),
1981 IOCTL_INFO_STD(VIDIOC_ENUMAUDOUT, vidioc_enumaudout, v4l_print_audioout, INFO_FL_CLEAR(v4l2_audioout, index)),
1982 IOCTL_INFO_FNC(VIDIOC_G_PRIORITY, v4l_g_priority, v4l_print_u32, 0),
1983 IOCTL_INFO_FNC(VIDIOC_S_PRIORITY, v4l_s_priority, v4l_print_u32, INFO_FL_PRIO),
1984 IOCTL_INFO_FNC(VIDIOC_G_SLICED_VBI_CAP, v4l_g_sliced_vbi_cap, v4l_print_sliced_vbi_cap, INFO_FL_CLEAR(v4l2_sliced_vbi_cap, type)),
1985 IOCTL_INFO_FNC(VIDIOC_LOG_STATUS, v4l_log_status, v4l_print_newline, 0),
1986 IOCTL_INFO_FNC(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL),
1987 IOCTL_INFO_FNC(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL),
1988 IOCTL_INFO_FNC(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL),
1989 IOCTL_INFO_STD(VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)),
1990 IOCTL_INFO_STD(VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)),
1991 IOCTL_INFO_STD(VIDIOC_G_ENC_INDEX, vidioc_g_enc_index, v4l_print_enc_idx, 0),
1992 IOCTL_INFO_STD(VIDIOC_ENCODER_CMD, vidioc_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_encoder_cmd, flags)),
1993 IOCTL_INFO_STD(VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_CLEAR(v4l2_encoder_cmd, flags)),
1994 IOCTL_INFO_STD(VIDIOC_DECODER_CMD, vidioc_decoder_cmd, v4l_print_decoder_cmd, INFO_FL_PRIO),
1995 IOCTL_INFO_STD(VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd, v4l_print_decoder_cmd, 0),
1996 IOCTL_INFO_FNC(VIDIOC_DBG_S_REGISTER, v4l_dbg_s_register, v4l_print_dbg_register, 0),
1997 IOCTL_INFO_FNC(VIDIOC_DBG_G_REGISTER, v4l_dbg_g_register, v4l_print_dbg_register, 0),
1998 IOCTL_INFO_FNC(VIDIOC_DBG_G_CHIP_IDENT, v4l_dbg_g_chip_ident, v4l_print_dbg_chip_ident, 0),
1999 IOCTL_INFO_FNC(VIDIOC_S_HW_FREQ_SEEK, v4l_s_hw_freq_seek, v4l_print_hw_freq_seek, INFO_FL_PRIO),
2000 IOCTL_INFO_STD(VIDIOC_ENUM_DV_PRESETS, vidioc_enum_dv_presets, v4l_print_dv_enum_presets, 0),
2001 IOCTL_INFO_STD(VIDIOC_S_DV_PRESET, vidioc_s_dv_preset, v4l_print_dv_preset, INFO_FL_PRIO),
2002 IOCTL_INFO_STD(VIDIOC_G_DV_PRESET, vidioc_g_dv_preset, v4l_print_dv_preset, 0),
2003 IOCTL_INFO_STD(VIDIOC_QUERY_DV_PRESET, vidioc_query_dv_preset, v4l_print_dv_preset, 0),
2004 IOCTL_INFO_STD(VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings, v4l_print_dv_timings, INFO_FL_PRIO),
2005 IOCTL_INFO_STD(VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings, v4l_print_dv_timings, 0),
2006 IOCTL_INFO_FNC(VIDIOC_DQEVENT, v4l_dqevent, v4l_print_event, 0),
2007 IOCTL_INFO_FNC(VIDIOC_SUBSCRIBE_EVENT, v4l_subscribe_event, v4l_print_event_subscription, 0),
2008 IOCTL_INFO_FNC(VIDIOC_UNSUBSCRIBE_EVENT, v4l_unsubscribe_event, v4l_print_event_subscription, 0),
2009 IOCTL_INFO_FNC(VIDIOC_CREATE_BUFS, v4l_create_bufs, v4l_print_create_buffers, INFO_FL_PRIO | INFO_FL_QUEUE),
2010 IOCTL_INFO_FNC(VIDIOC_PREPARE_BUF, v4l_prepare_buf, v4l_print_buffer, INFO_FL_QUEUE),
2011 IOCTL_INFO_STD(VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings, v4l_print_enum_dv_timings, 0),
2012 IOCTL_INFO_STD(VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings, v4l_print_dv_timings, 0),
2013 IOCTL_INFO_STD(VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap, v4l_print_dv_timings_cap, INFO_FL_CLEAR(v4l2_dv_timings_cap, type)),
2014 IOCTL_INFO_FNC(VIDIOC_ENUM_FREQ_BANDS, v4l_enum_freq_bands, v4l_print_freq_band, 0),
2015};
2016#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
2017
2018bool v4l2_is_known_ioctl(unsigned int cmd)
2019{
2020 if (_IOC_NR(cmd) >= V4L2_IOCTLS)
2021 return false;
2022 return v4l2_ioctls[_IOC_NR(cmd)].ioctl == cmd;
2023}
2024
2025struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev, unsigned cmd)
2026{
2027 if (_IOC_NR(cmd) >= V4L2_IOCTLS)
2028 return vdev->lock;
2029 if (test_bit(_IOC_NR(cmd), vdev->disable_locking))
2030 return NULL;
2031 if (vdev->queue && vdev->queue->lock &&
2032 (v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE))
2033 return vdev->queue->lock;
2034 return vdev->lock;
2035}
2036
2037/* Common ioctl debug function. This function can be used by
2038 external ioctl messages as well as internal V4L ioctl */
2039void v4l_printk_ioctl(const char *prefix, unsigned int cmd)
2040{
2041 const char *dir, *type;
2042
2043 if (prefix)
2044 printk(KERN_DEBUG "%s: ", prefix);
2045
2046 switch (_IOC_TYPE(cmd)) {
2047 case 'd':
2048 type = "v4l2_int";
2049 break;
2050 case 'V':
2051 if (_IOC_NR(cmd) >= V4L2_IOCTLS) {
2052 type = "v4l2";
2053 break;
2054 }
2055 pr_cont("%s", v4l2_ioctls[_IOC_NR(cmd)].name);
2056 return;
2057 default:
2058 type = "unknown";
2059 break;
2060 }
2061
2062 switch (_IOC_DIR(cmd)) {
2063 case _IOC_NONE: dir = "--"; break;
2064 case _IOC_READ: dir = "r-"; break;
2065 case _IOC_WRITE: dir = "-w"; break;
2066 case _IOC_READ | _IOC_WRITE: dir = "rw"; break;
2067 default: dir = "*ERR*"; break;
2068 }
2069 pr_cont("%s ioctl '%c', dir=%s, #%d (0x%08x)",
2070 type, _IOC_TYPE(cmd), dir, _IOC_NR(cmd), cmd);
2071}
2072EXPORT_SYMBOL(v4l_printk_ioctl);
2073
2074static long __video_do_ioctl(struct file *file,
2075 unsigned int cmd, void *arg)
2076{
2077 struct video_device *vfd = video_devdata(file);
2078 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
2079 bool write_only = false;
2080 struct v4l2_ioctl_info default_info;
2081 const struct v4l2_ioctl_info *info;
2082 void *fh = file->private_data;
2083 struct v4l2_fh *vfh = NULL;
2084 int use_fh_prio = 0;
2085 int debug = vfd->debug;
2086 long ret = -ENOTTY;
2087
2088 if (ops == NULL) {
2089 pr_warn("%s: has no ioctl_ops.\n",
2090 video_device_node_name(vfd));
2091 return ret;
2092 }
2093
2094 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
2095 vfh = file->private_data;
2096 use_fh_prio = test_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
2097 }
2098
2099 if (v4l2_is_known_ioctl(cmd)) {
2100 info = &v4l2_ioctls[_IOC_NR(cmd)];
2101
2102 if (!test_bit(_IOC_NR(cmd), vfd->valid_ioctls) &&
2103 !((info->flags & INFO_FL_CTRL) && vfh && vfh->ctrl_handler))
2104 goto done;
2105
2106 if (use_fh_prio && (info->flags & INFO_FL_PRIO)) {
2107 ret = v4l2_prio_check(vfd->prio, vfh->prio);
2108 if (ret)
2109 goto done;
2110 }
2111 } else {
2112 default_info.ioctl = cmd;
2113 default_info.flags = 0;
2114 default_info.debug = v4l_print_default;
2115 info = &default_info;
2116 }
2117
2118 write_only = _IOC_DIR(cmd) == _IOC_WRITE;
2119 if (write_only && debug > V4L2_DEBUG_IOCTL) {
2120 v4l_printk_ioctl(video_device_node_name(vfd), cmd);
2121 pr_cont(": ");
2122 info->debug(arg, write_only);
2123 }
2124 if (info->flags & INFO_FL_STD) {
2125 typedef int (*vidioc_op)(struct file *file, void *fh, void *p);
2126 const void *p = vfd->ioctl_ops;
2127 const vidioc_op *vidioc = p + info->u.offset;
2128
2129 ret = (*vidioc)(file, fh, arg);
2130 } else if (info->flags & INFO_FL_FUNC) {
2131 ret = info->u.func(ops, file, fh, arg);
2132 } else if (!ops->vidioc_default) {
2133 ret = -ENOTTY;
2134 } else {
2135 ret = ops->vidioc_default(file, fh,
2136 use_fh_prio ? v4l2_prio_check(vfd->prio, vfh->prio) >= 0 : 0,
2137 cmd, arg);
2138 }
2139
2140done:
2141 if (debug) {
2142 if (write_only && debug > V4L2_DEBUG_IOCTL) {
2143 if (ret < 0)
2144 printk(KERN_DEBUG "%s: error %ld\n",
2145 video_device_node_name(vfd), ret);
2146 return ret;
2147 }
2148 v4l_printk_ioctl(video_device_node_name(vfd), cmd);
2149 if (ret < 0)
2150 pr_cont(": error %ld\n", ret);
2151 else if (debug == V4L2_DEBUG_IOCTL)
2152 pr_cont("\n");
2153 else if (_IOC_DIR(cmd) == _IOC_NONE)
2154 info->debug(arg, write_only);
2155 else {
2156 pr_cont(": ");
2157 info->debug(arg, write_only);
2158 }
2159 }
2160
2161 return ret;
2162}
2163
2164static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
2165 void * __user *user_ptr, void ***kernel_ptr)
2166{
2167 int ret = 0;
2168
2169 switch (cmd) {
2170 case VIDIOC_QUERYBUF:
2171 case VIDIOC_QBUF:
2172 case VIDIOC_DQBUF: {
2173 struct v4l2_buffer *buf = parg;
2174
2175 if (V4L2_TYPE_IS_MULTIPLANAR(buf->type) && buf->length > 0) {
2176 if (buf->length > VIDEO_MAX_PLANES) {
2177 ret = -EINVAL;
2178 break;
2179 }
2180 *user_ptr = (void __user *)buf->m.planes;
2181 *kernel_ptr = (void *)&buf->m.planes;
2182 *array_size = sizeof(struct v4l2_plane) * buf->length;
2183 ret = 1;
2184 }
2185 break;
2186 }
2187
2188 case VIDIOC_S_EXT_CTRLS:
2189 case VIDIOC_G_EXT_CTRLS:
2190 case VIDIOC_TRY_EXT_CTRLS: {
2191 struct v4l2_ext_controls *ctrls = parg;
2192
2193 if (ctrls->count != 0) {
2194 if (ctrls->count > V4L2_CID_MAX_CTRLS) {
2195 ret = -EINVAL;
2196 break;
2197 }
2198 *user_ptr = (void __user *)ctrls->controls;
2199 *kernel_ptr = (void *)&ctrls->controls;
2200 *array_size = sizeof(struct v4l2_ext_control)
2201 * ctrls->count;
2202 ret = 1;
2203 }
2204 break;
2205 }
2206 }
2207
2208 return ret;
2209}
2210
2211long
2212video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
2213 v4l2_kioctl func)
2214{
2215 char sbuf[128];
2216 void *mbuf = NULL;
2217 void *parg = (void *)arg;
2218 long err = -EINVAL;
2219 bool has_array_args;
2220 size_t array_size = 0;
2221 void __user *user_ptr = NULL;
2222 void **kernel_ptr = NULL;
2223
2224 /* Copy arguments into temp kernel buffer */
2225 if (_IOC_DIR(cmd) != _IOC_NONE) {
2226 if (_IOC_SIZE(cmd) <= sizeof(sbuf)) {
2227 parg = sbuf;
2228 } else {
2229 /* too big to allocate from stack */
2230 mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
2231 if (NULL == mbuf)
2232 return -ENOMEM;
2233 parg = mbuf;
2234 }
2235
2236 err = -EFAULT;
2237 if (_IOC_DIR(cmd) & _IOC_WRITE) {
2238 unsigned int n = _IOC_SIZE(cmd);
2239
2240 /*
2241 * In some cases, only a few fields are used as input,
2242 * i.e. when the app sets "index" and then the driver
2243 * fills in the rest of the structure for the thing
2244 * with that index. We only need to copy up the first
2245 * non-input field.
2246 */
2247 if (v4l2_is_known_ioctl(cmd)) {
2248 u32 flags = v4l2_ioctls[_IOC_NR(cmd)].flags;
2249 if (flags & INFO_FL_CLEAR_MASK)
2250 n = (flags & INFO_FL_CLEAR_MASK) >> 16;
2251 }
2252
2253 if (copy_from_user(parg, (void __user *)arg, n))
2254 goto out;
2255
2256 /* zero out anything we don't copy from userspace */
2257 if (n < _IOC_SIZE(cmd))
2258 memset((u8 *)parg + n, 0, _IOC_SIZE(cmd) - n);
2259 } else {
2260 /* read-only ioctl */
2261 memset(parg, 0, _IOC_SIZE(cmd));
2262 }
2263 }
2264
2265 err = check_array_args(cmd, parg, &array_size, &user_ptr, &kernel_ptr);
2266 if (err < 0)
2267 goto out;
2268 has_array_args = err;
2269
2270 if (has_array_args) {
2271 /*
2272 * When adding new types of array args, make sure that the
2273 * parent argument to ioctl (which contains the pointer to the
2274 * array) fits into sbuf (so that mbuf will still remain
2275 * unused up to here).
2276 */
2277 mbuf = kmalloc(array_size, GFP_KERNEL);
2278 err = -ENOMEM;
2279 if (NULL == mbuf)
2280 goto out_array_args;
2281 err = -EFAULT;
2282 if (copy_from_user(mbuf, user_ptr, array_size))
2283 goto out_array_args;
2284 *kernel_ptr = mbuf;
2285 }
2286
2287 /* Handles IOCTL */
2288 err = func(file, cmd, parg);
2289 if (err == -ENOIOCTLCMD)
2290 err = -ENOTTY;
2291
2292 if (has_array_args) {
2293 *kernel_ptr = user_ptr;
2294 if (copy_to_user(user_ptr, mbuf, array_size))
2295 err = -EFAULT;
2296 goto out_array_args;
2297 }
2298 /* VIDIOC_QUERY_DV_TIMINGS can return an error, but still have valid
2299 results that must be returned. */
2300 if (err < 0 && cmd != VIDIOC_QUERY_DV_TIMINGS)
2301 goto out;
2302
2303out_array_args:
2304 /* Copy results into user buffer */
2305 switch (_IOC_DIR(cmd)) {
2306 case _IOC_READ:
2307 case (_IOC_WRITE | _IOC_READ):
2308 if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd)))
2309 err = -EFAULT;
2310 break;
2311 }
2312
2313out:
2314 kfree(mbuf);
2315 return err;
2316}
2317EXPORT_SYMBOL(video_usercopy);
2318
2319long video_ioctl2(struct file *file,
2320 unsigned int cmd, unsigned long arg)
2321{
2322 return video_usercopy(file, cmd, arg, __video_do_ioctl);
2323}
2324EXPORT_SYMBOL(video_ioctl2);
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
new file mode 100644
index 000000000000..97b48318aee1
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -0,0 +1,647 @@
1/*
2 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
3 *
4 * Helper functions for devices that use videobuf buffers for both their
5 * source and destination.
6 *
7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8 * Pawel Osciak, <pawel@osciak.com>
9 * Marek Szyprowski, <m.szyprowski@samsung.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19
20#include <media/videobuf2-core.h>
21#include <media/v4l2-mem2mem.h>
22#include <media/v4l2-dev.h>
23#include <media/v4l2-fh.h>
24#include <media/v4l2-event.h>
25
26MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
27MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
28MODULE_LICENSE("GPL");
29
30static bool debug;
31module_param(debug, bool, 0644);
32
33#define dprintk(fmt, arg...) \
34 do { \
35 if (debug) \
36 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
37 } while (0)
38
39
40/* Instance is already queued on the job_queue */
41#define TRANS_QUEUED (1 << 0)
42/* Instance is currently running in hardware */
43#define TRANS_RUNNING (1 << 1)
44
45
46/* Offset base for buffers on the destination queue - used to distinguish
47 * between source and destination buffers when mmapping - they receive the same
48 * offsets but for different queues */
49#define DST_QUEUE_OFF_BASE (1 << 30)
50
51
52/**
53 * struct v4l2_m2m_dev - per-device context
54 * @curr_ctx: currently running instance
55 * @job_queue: instances queued to run
56 * @job_spinlock: protects job_queue
57 * @m2m_ops: driver callbacks
58 */
59struct v4l2_m2m_dev {
60 struct v4l2_m2m_ctx *curr_ctx;
61
62 struct list_head job_queue;
63 spinlock_t job_spinlock;
64
65 struct v4l2_m2m_ops *m2m_ops;
66};
67
68static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
69 enum v4l2_buf_type type)
70{
71 if (V4L2_TYPE_IS_OUTPUT(type))
72 return &m2m_ctx->out_q_ctx;
73 else
74 return &m2m_ctx->cap_q_ctx;
75}
76
77/**
78 * v4l2_m2m_get_vq() - return vb2_queue for the given type
79 */
80struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
81 enum v4l2_buf_type type)
82{
83 struct v4l2_m2m_queue_ctx *q_ctx;
84
85 q_ctx = get_queue_ctx(m2m_ctx, type);
86 if (!q_ctx)
87 return NULL;
88
89 return &q_ctx->q;
90}
91EXPORT_SYMBOL(v4l2_m2m_get_vq);
92
93/**
94 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
95 */
96void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
97{
98 struct v4l2_m2m_buffer *b = NULL;
99 unsigned long flags;
100
101 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
102
103 if (list_empty(&q_ctx->rdy_queue)) {
104 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
105 return NULL;
106 }
107
108 b = list_entry(q_ctx->rdy_queue.next, struct v4l2_m2m_buffer, list);
109 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
110 return &b->vb;
111}
112EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
113
114/**
115 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
116 * return it
117 */
118void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
119{
120 struct v4l2_m2m_buffer *b = NULL;
121 unsigned long flags;
122
123 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
124 if (list_empty(&q_ctx->rdy_queue)) {
125 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
126 return NULL;
127 }
128 b = list_entry(q_ctx->rdy_queue.next, struct v4l2_m2m_buffer, list);
129 list_del(&b->list);
130 q_ctx->num_rdy--;
131 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
132
133 return &b->vb;
134}
135EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
136
137/*
138 * Scheduling handlers
139 */
140
141/**
142 * v4l2_m2m_get_curr_priv() - return driver private data for the currently
143 * running instance or NULL if no instance is running
144 */
145void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
146{
147 unsigned long flags;
148 void *ret = NULL;
149
150 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
151 if (m2m_dev->curr_ctx)
152 ret = m2m_dev->curr_ctx->priv;
153 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
154
155 return ret;
156}
157EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
158
159/**
160 * v4l2_m2m_try_run() - select next job to perform and run it if possible
161 *
162 * Get next transaction (if present) from the waiting jobs list and run it.
163 */
164static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
165{
166 unsigned long flags;
167
168 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
169 if (NULL != m2m_dev->curr_ctx) {
170 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
171 dprintk("Another instance is running, won't run now\n");
172 return;
173 }
174
175 if (list_empty(&m2m_dev->job_queue)) {
176 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
177 dprintk("No job pending\n");
178 return;
179 }
180
181 m2m_dev->curr_ctx = list_entry(m2m_dev->job_queue.next,
182 struct v4l2_m2m_ctx, queue);
183 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
184 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
185
186 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
187}
188
189/**
190 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
191 * the pending job queue and add it if so.
192 * @m2m_ctx: m2m context assigned to the instance to be checked
193 *
194 * There are three basic requirements an instance has to meet to be able to run:
195 * 1) at least one source buffer has to be queued,
196 * 2) at least one destination buffer has to be queued,
197 * 3) streaming has to be on.
198 *
199 * There may also be additional, custom requirements. In such case the driver
200 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
201 * return 1 if the instance is ready.
202 * An example of the above could be an instance that requires more than one
203 * src/dst buffer per transaction.
204 */
205static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
206{
207 struct v4l2_m2m_dev *m2m_dev;
208 unsigned long flags_job, flags;
209
210 m2m_dev = m2m_ctx->m2m_dev;
211 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
212
213 if (!m2m_ctx->out_q_ctx.q.streaming
214 || !m2m_ctx->cap_q_ctx.q.streaming) {
215 dprintk("Streaming needs to be on for both queues\n");
216 return;
217 }
218
219 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
220 if (m2m_ctx->job_flags & TRANS_QUEUED) {
221 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
222 dprintk("On job queue already\n");
223 return;
224 }
225
226 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
227 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
228 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
229 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
230 dprintk("No input buffers available\n");
231 return;
232 }
233 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
234 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
235 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
236 dprintk("No output buffers available\n");
237 return;
238 }
239 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
240
241 if (m2m_dev->m2m_ops->job_ready
242 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
243 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
244 dprintk("Driver not ready\n");
245 return;
246 }
247
248 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
249 m2m_ctx->job_flags |= TRANS_QUEUED;
250
251 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
252
253 v4l2_m2m_try_run(m2m_dev);
254}
255
256/**
257 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
258 * and have it clean up
259 *
260 * Called by a driver to yield back the device after it has finished with it.
261 * Should be called as soon as possible after reaching a state which allows
262 * other instances to take control of the device.
263 *
264 * This function has to be called only after device_run() callback has been
265 * called on the driver. To prevent recursion, it should not be called directly
266 * from the device_run() callback though.
267 */
268void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
269 struct v4l2_m2m_ctx *m2m_ctx)
270{
271 unsigned long flags;
272
273 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
274 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
275 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
276 dprintk("Called by an instance not currently running\n");
277 return;
278 }
279
280 list_del(&m2m_dev->curr_ctx->queue);
281 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
282 wake_up(&m2m_dev->curr_ctx->finished);
283 m2m_dev->curr_ctx = NULL;
284
285 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
286
287 /* This instance might have more buffers ready, but since we do not
288 * allow more than one job on the job_queue per instance, each has
289 * to be scheduled separately after the previous one finishes. */
290 v4l2_m2m_try_schedule(m2m_ctx);
291 v4l2_m2m_try_run(m2m_dev);
292}
293EXPORT_SYMBOL(v4l2_m2m_job_finish);
294
295/**
296 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
297 */
298int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
299 struct v4l2_requestbuffers *reqbufs)
300{
301 struct vb2_queue *vq;
302
303 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
304 return vb2_reqbufs(vq, reqbufs);
305}
306EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
307
308/**
309 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
310 *
311 * See v4l2_m2m_mmap() documentation for details.
312 */
313int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
314 struct v4l2_buffer *buf)
315{
316 struct vb2_queue *vq;
317 int ret = 0;
318 unsigned int i;
319
320 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
321 ret = vb2_querybuf(vq, buf);
322
323 /* Adjust MMAP memory offsets for the CAPTURE queue */
324 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
325 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
326 for (i = 0; i < buf->length; ++i)
327 buf->m.planes[i].m.mem_offset
328 += DST_QUEUE_OFF_BASE;
329 } else {
330 buf->m.offset += DST_QUEUE_OFF_BASE;
331 }
332 }
333
334 return ret;
335}
336EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
337
338/**
339 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
340 * the type
341 */
342int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
343 struct v4l2_buffer *buf)
344{
345 struct vb2_queue *vq;
346 int ret;
347
348 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
349 ret = vb2_qbuf(vq, buf);
350 if (!ret)
351 v4l2_m2m_try_schedule(m2m_ctx);
352
353 return ret;
354}
355EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
356
357/**
358 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
359 * the type
360 */
361int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
362 struct v4l2_buffer *buf)
363{
364 struct vb2_queue *vq;
365
366 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
367 return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
368}
369EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
370
371/**
372 * v4l2_m2m_streamon() - turn on streaming for a video queue
373 */
374int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
375 enum v4l2_buf_type type)
376{
377 struct vb2_queue *vq;
378 int ret;
379
380 vq = v4l2_m2m_get_vq(m2m_ctx, type);
381 ret = vb2_streamon(vq, type);
382 if (!ret)
383 v4l2_m2m_try_schedule(m2m_ctx);
384
385 return ret;
386}
387EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
388
389/**
390 * v4l2_m2m_streamoff() - turn off streaming for a video queue
391 */
392int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
393 enum v4l2_buf_type type)
394{
395 struct vb2_queue *vq;
396
397 vq = v4l2_m2m_get_vq(m2m_ctx, type);
398 return vb2_streamoff(vq, type);
399}
400EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
401
402/**
403 * v4l2_m2m_poll() - poll replacement, for destination buffers only
404 *
405 * Call from the driver's poll() function. Will poll both queues. If a buffer
406 * is available to dequeue (with dqbuf) from the source queue, this will
407 * indicate that a non-blocking write can be performed, while read will be
408 * returned in case of the destination queue.
409 */
410unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
411 struct poll_table_struct *wait)
412{
413 struct video_device *vfd = video_devdata(file);
414 unsigned long req_events = poll_requested_events(wait);
415 struct vb2_queue *src_q, *dst_q;
416 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
417 unsigned int rc = 0;
418 unsigned long flags;
419
420 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
421 struct v4l2_fh *fh = file->private_data;
422
423 if (v4l2_event_pending(fh))
424 rc = POLLPRI;
425 else if (req_events & POLLPRI)
426 poll_wait(file, &fh->wait, wait);
427 if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
428 return rc;
429 }
430
431 src_q = v4l2_m2m_get_src_vq(m2m_ctx);
432 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
433
434 /*
435 * There has to be at least one buffer queued on each queued_list, which
436 * means either in driver already or waiting for driver to claim it
437 * and start processing.
438 */
439 if ((!src_q->streaming || list_empty(&src_q->queued_list))
440 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
441 rc |= POLLERR;
442 goto end;
443 }
444
445 if (m2m_ctx->m2m_dev->m2m_ops->unlock)
446 m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
447
448 poll_wait(file, &src_q->done_wq, wait);
449 poll_wait(file, &dst_q->done_wq, wait);
450
451 if (m2m_ctx->m2m_dev->m2m_ops->lock)
452 m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
453
454 spin_lock_irqsave(&src_q->done_lock, flags);
455 if (!list_empty(&src_q->done_list))
456 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
457 done_entry);
458 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
459 || src_vb->state == VB2_BUF_STATE_ERROR))
460 rc |= POLLOUT | POLLWRNORM;
461 spin_unlock_irqrestore(&src_q->done_lock, flags);
462
463 spin_lock_irqsave(&dst_q->done_lock, flags);
464 if (!list_empty(&dst_q->done_list))
465 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
466 done_entry);
467 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
468 || dst_vb->state == VB2_BUF_STATE_ERROR))
469 rc |= POLLIN | POLLRDNORM;
470 spin_unlock_irqrestore(&dst_q->done_lock, flags);
471
472end:
473 return rc;
474}
475EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
476
477/**
478 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
479 *
480 * Call from driver's mmap() function. Will handle mmap() for both queues
481 * seamlessly for videobuffer, which will receive normal per-queue offsets and
482 * proper videobuf queue pointers. The differentiation is made outside videobuf
483 * by adding a predefined offset to buffers from one of the queues and
484 * subtracting it before passing it back to videobuf. Only drivers (and
485 * thus applications) receive modified offsets.
486 */
487int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
488 struct vm_area_struct *vma)
489{
490 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
491 struct vb2_queue *vq;
492
493 if (offset < DST_QUEUE_OFF_BASE) {
494 vq = v4l2_m2m_get_src_vq(m2m_ctx);
495 } else {
496 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
497 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
498 }
499
500 return vb2_mmap(vq, vma);
501}
502EXPORT_SYMBOL(v4l2_m2m_mmap);
503
504/**
505 * v4l2_m2m_init() - initialize per-driver m2m data
506 *
507 * Usually called from driver's probe() function.
508 */
509struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops)
510{
511 struct v4l2_m2m_dev *m2m_dev;
512
513 if (!m2m_ops)
514 return ERR_PTR(-EINVAL);
515
516 BUG_ON(!m2m_ops->device_run);
517 BUG_ON(!m2m_ops->job_abort);
518
519 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
520 if (!m2m_dev)
521 return ERR_PTR(-ENOMEM);
522
523 m2m_dev->curr_ctx = NULL;
524 m2m_dev->m2m_ops = m2m_ops;
525 INIT_LIST_HEAD(&m2m_dev->job_queue);
526 spin_lock_init(&m2m_dev->job_spinlock);
527
528 return m2m_dev;
529}
530EXPORT_SYMBOL_GPL(v4l2_m2m_init);
531
532/**
533 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
534 *
535 * Usually called from driver's remove() function.
536 */
537void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
538{
539 kfree(m2m_dev);
540}
541EXPORT_SYMBOL_GPL(v4l2_m2m_release);
542
543/**
544 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
545 * @priv - driver's instance private data
546 * @m2m_dev - a previously initialized m2m_dev struct
547 * @vq_init - a callback for queue type-specific initialization function to be
548 * used for initializing videobuf_queues
549 *
550 * Usually called from driver's open() function.
551 */
552struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
553 void *drv_priv,
554 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
555{
556 struct v4l2_m2m_ctx *m2m_ctx;
557 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
558 int ret;
559
560 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
561 if (!m2m_ctx)
562 return ERR_PTR(-ENOMEM);
563
564 m2m_ctx->priv = drv_priv;
565 m2m_ctx->m2m_dev = m2m_dev;
566 init_waitqueue_head(&m2m_ctx->finished);
567
568 out_q_ctx = &m2m_ctx->out_q_ctx;
569 cap_q_ctx = &m2m_ctx->cap_q_ctx;
570
571 INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
572 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
573 spin_lock_init(&out_q_ctx->rdy_spinlock);
574 spin_lock_init(&cap_q_ctx->rdy_spinlock);
575
576 INIT_LIST_HEAD(&m2m_ctx->queue);
577
578 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
579
580 if (ret)
581 goto err;
582
583 return m2m_ctx;
584err:
585 kfree(m2m_ctx);
586 return ERR_PTR(ret);
587}
588EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
589
590/**
591 * v4l2_m2m_ctx_release() - release m2m context
592 *
593 * Usually called from driver's release() function.
594 */
595void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
596{
597 struct v4l2_m2m_dev *m2m_dev;
598 unsigned long flags;
599
600 m2m_dev = m2m_ctx->m2m_dev;
601
602 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
603 if (m2m_ctx->job_flags & TRANS_RUNNING) {
604 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
605 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
606 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
607 wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING));
608 } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
609 list_del(&m2m_ctx->queue);
610 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
611 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
612 dprintk("m2m_ctx: %p had been on queue and was removed\n",
613 m2m_ctx);
614 } else {
615 /* Do nothing, was not on queue/running */
616 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
617 }
618
619 vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
620 vb2_queue_release(&m2m_ctx->out_q_ctx.q);
621
622 kfree(m2m_ctx);
623}
624EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
625
626/**
627 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
628 *
629 * Call from buf_queue(), videobuf_queue_ops callback.
630 */
631void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
632{
633 struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
634 struct v4l2_m2m_queue_ctx *q_ctx;
635 unsigned long flags;
636
637 q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
638 if (!q_ctx)
639 return;
640
641 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
642 list_add_tail(&b->list, &q_ctx->rdy_queue);
643 q_ctx->num_rdy++;
644 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
645}
646EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
647
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
new file mode 100644
index 000000000000..9182f81deb5b
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -0,0 +1,470 @@
1/*
2 * V4L2 sub-device
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 *
6 * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
7 * Sakari Ailus <sakari.ailus@iki.fi>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/ioctl.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/videodev2.h>
27#include <linux/export.h>
28
29#include <media/v4l2-ctrls.h>
30#include <media/v4l2-device.h>
31#include <media/v4l2-ioctl.h>
32#include <media/v4l2-fh.h>
33#include <media/v4l2-event.h>
34
35static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
36{
37#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
38 fh->pad = kzalloc(sizeof(*fh->pad) * sd->entity.num_pads, GFP_KERNEL);
39 if (fh->pad == NULL)
40 return -ENOMEM;
41#endif
42 return 0;
43}
44
45static void subdev_fh_free(struct v4l2_subdev_fh *fh)
46{
47#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
48 kfree(fh->pad);
49 fh->pad = NULL;
50#endif
51}
52
53static int subdev_open(struct file *file)
54{
55 struct video_device *vdev = video_devdata(file);
56 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
57 struct v4l2_subdev_fh *subdev_fh;
58#if defined(CONFIG_MEDIA_CONTROLLER)
59 struct media_entity *entity = NULL;
60#endif
61 int ret;
62
63 subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL);
64 if (subdev_fh == NULL)
65 return -ENOMEM;
66
67 ret = subdev_fh_init(subdev_fh, sd);
68 if (ret) {
69 kfree(subdev_fh);
70 return ret;
71 }
72
73 v4l2_fh_init(&subdev_fh->vfh, vdev);
74 v4l2_fh_add(&subdev_fh->vfh);
75 file->private_data = &subdev_fh->vfh;
76#if defined(CONFIG_MEDIA_CONTROLLER)
77 if (sd->v4l2_dev->mdev) {
78 entity = media_entity_get(&sd->entity);
79 if (!entity) {
80 ret = -EBUSY;
81 goto err;
82 }
83 }
84#endif
85
86 if (sd->internal_ops && sd->internal_ops->open) {
87 ret = sd->internal_ops->open(sd, subdev_fh);
88 if (ret < 0)
89 goto err;
90 }
91
92 return 0;
93
94err:
95#if defined(CONFIG_MEDIA_CONTROLLER)
96 if (entity)
97 media_entity_put(entity);
98#endif
99 v4l2_fh_del(&subdev_fh->vfh);
100 v4l2_fh_exit(&subdev_fh->vfh);
101 subdev_fh_free(subdev_fh);
102 kfree(subdev_fh);
103
104 return ret;
105}
106
107static int subdev_close(struct file *file)
108{
109 struct video_device *vdev = video_devdata(file);
110 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
111 struct v4l2_fh *vfh = file->private_data;
112 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
113
114 if (sd->internal_ops && sd->internal_ops->close)
115 sd->internal_ops->close(sd, subdev_fh);
116#if defined(CONFIG_MEDIA_CONTROLLER)
117 if (sd->v4l2_dev->mdev)
118 media_entity_put(&sd->entity);
119#endif
120 v4l2_fh_del(vfh);
121 v4l2_fh_exit(vfh);
122 subdev_fh_free(subdev_fh);
123 kfree(subdev_fh);
124 file->private_data = NULL;
125
126 return 0;
127}
128
129static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
130{
131 struct video_device *vdev = video_devdata(file);
132 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
133 struct v4l2_fh *vfh = file->private_data;
134#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
135 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
136#endif
137
138 switch (cmd) {
139 case VIDIOC_QUERYCTRL:
140 return v4l2_queryctrl(vfh->ctrl_handler, arg);
141
142 case VIDIOC_QUERYMENU:
143 return v4l2_querymenu(vfh->ctrl_handler, arg);
144
145 case VIDIOC_G_CTRL:
146 return v4l2_g_ctrl(vfh->ctrl_handler, arg);
147
148 case VIDIOC_S_CTRL:
149 return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
150
151 case VIDIOC_G_EXT_CTRLS:
152 return v4l2_g_ext_ctrls(vfh->ctrl_handler, arg);
153
154 case VIDIOC_S_EXT_CTRLS:
155 return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, arg);
156
157 case VIDIOC_TRY_EXT_CTRLS:
158 return v4l2_try_ext_ctrls(vfh->ctrl_handler, arg);
159
160 case VIDIOC_DQEVENT:
161 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
162 return -ENOIOCTLCMD;
163
164 return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
165
166 case VIDIOC_SUBSCRIBE_EVENT:
167 return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
168
169 case VIDIOC_UNSUBSCRIBE_EVENT:
170 return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
171
172#ifdef CONFIG_VIDEO_ADV_DEBUG
173 case VIDIOC_DBG_G_REGISTER:
174 {
175 struct v4l2_dbg_register *p = arg;
176
177 if (!capable(CAP_SYS_ADMIN))
178 return -EPERM;
179 return v4l2_subdev_call(sd, core, g_register, p);
180 }
181 case VIDIOC_DBG_S_REGISTER:
182 {
183 struct v4l2_dbg_register *p = arg;
184
185 if (!capable(CAP_SYS_ADMIN))
186 return -EPERM;
187 return v4l2_subdev_call(sd, core, s_register, p);
188 }
189#endif
190
191 case VIDIOC_LOG_STATUS: {
192 int ret;
193
194 pr_info("%s: ================= START STATUS =================\n",
195 sd->name);
196 ret = v4l2_subdev_call(sd, core, log_status);
197 pr_info("%s: ================== END STATUS ==================\n",
198 sd->name);
199 return ret;
200 }
201
202#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
203 case VIDIOC_SUBDEV_G_FMT: {
204 struct v4l2_subdev_format *format = arg;
205
206 if (format->which != V4L2_SUBDEV_FORMAT_TRY &&
207 format->which != V4L2_SUBDEV_FORMAT_ACTIVE)
208 return -EINVAL;
209
210 if (format->pad >= sd->entity.num_pads)
211 return -EINVAL;
212
213 return v4l2_subdev_call(sd, pad, get_fmt, subdev_fh, format);
214 }
215
216 case VIDIOC_SUBDEV_S_FMT: {
217 struct v4l2_subdev_format *format = arg;
218
219 if (format->which != V4L2_SUBDEV_FORMAT_TRY &&
220 format->which != V4L2_SUBDEV_FORMAT_ACTIVE)
221 return -EINVAL;
222
223 if (format->pad >= sd->entity.num_pads)
224 return -EINVAL;
225
226 return v4l2_subdev_call(sd, pad, set_fmt, subdev_fh, format);
227 }
228
229 case VIDIOC_SUBDEV_G_CROP: {
230 struct v4l2_subdev_crop *crop = arg;
231 struct v4l2_subdev_selection sel;
232 int rval;
233
234 if (crop->which != V4L2_SUBDEV_FORMAT_TRY &&
235 crop->which != V4L2_SUBDEV_FORMAT_ACTIVE)
236 return -EINVAL;
237
238 if (crop->pad >= sd->entity.num_pads)
239 return -EINVAL;
240
241 rval = v4l2_subdev_call(sd, pad, get_crop, subdev_fh, crop);
242 if (rval != -ENOIOCTLCMD)
243 return rval;
244
245 memset(&sel, 0, sizeof(sel));
246 sel.which = crop->which;
247 sel.pad = crop->pad;
248 sel.target = V4L2_SEL_TGT_CROP;
249
250 rval = v4l2_subdev_call(
251 sd, pad, get_selection, subdev_fh, &sel);
252
253 crop->rect = sel.r;
254
255 return rval;
256 }
257
258 case VIDIOC_SUBDEV_S_CROP: {
259 struct v4l2_subdev_crop *crop = arg;
260 struct v4l2_subdev_selection sel;
261 int rval;
262
263 if (crop->which != V4L2_SUBDEV_FORMAT_TRY &&
264 crop->which != V4L2_SUBDEV_FORMAT_ACTIVE)
265 return -EINVAL;
266
267 if (crop->pad >= sd->entity.num_pads)
268 return -EINVAL;
269
270 rval = v4l2_subdev_call(sd, pad, set_crop, subdev_fh, crop);
271 if (rval != -ENOIOCTLCMD)
272 return rval;
273
274 memset(&sel, 0, sizeof(sel));
275 sel.which = crop->which;
276 sel.pad = crop->pad;
277 sel.target = V4L2_SEL_TGT_CROP;
278 sel.r = crop->rect;
279
280 rval = v4l2_subdev_call(
281 sd, pad, set_selection, subdev_fh, &sel);
282
283 crop->rect = sel.r;
284
285 return rval;
286 }
287
288 case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
289 struct v4l2_subdev_mbus_code_enum *code = arg;
290
291 if (code->pad >= sd->entity.num_pads)
292 return -EINVAL;
293
294 return v4l2_subdev_call(sd, pad, enum_mbus_code, subdev_fh,
295 code);
296 }
297
298 case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
299 struct v4l2_subdev_frame_size_enum *fse = arg;
300
301 if (fse->pad >= sd->entity.num_pads)
302 return -EINVAL;
303
304 return v4l2_subdev_call(sd, pad, enum_frame_size, subdev_fh,
305 fse);
306 }
307
308 case VIDIOC_SUBDEV_G_FRAME_INTERVAL:
309 return v4l2_subdev_call(sd, video, g_frame_interval, arg);
310
311 case VIDIOC_SUBDEV_S_FRAME_INTERVAL:
312 return v4l2_subdev_call(sd, video, s_frame_interval, arg);
313
314 case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
315 struct v4l2_subdev_frame_interval_enum *fie = arg;
316
317 if (fie->pad >= sd->entity.num_pads)
318 return -EINVAL;
319
320 return v4l2_subdev_call(sd, pad, enum_frame_interval, subdev_fh,
321 fie);
322 }
323
324 case VIDIOC_SUBDEV_G_SELECTION: {
325 struct v4l2_subdev_selection *sel = arg;
326
327 if (sel->which != V4L2_SUBDEV_FORMAT_TRY &&
328 sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
329 return -EINVAL;
330
331 if (sel->pad >= sd->entity.num_pads)
332 return -EINVAL;
333
334 return v4l2_subdev_call(
335 sd, pad, get_selection, subdev_fh, sel);
336 }
337
338 case VIDIOC_SUBDEV_S_SELECTION: {
339 struct v4l2_subdev_selection *sel = arg;
340
341 if (sel->which != V4L2_SUBDEV_FORMAT_TRY &&
342 sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
343 return -EINVAL;
344
345 if (sel->pad >= sd->entity.num_pads)
346 return -EINVAL;
347
348 return v4l2_subdev_call(
349 sd, pad, set_selection, subdev_fh, sel);
350 }
351#endif
352 default:
353 return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
354 }
355
356 return 0;
357}
358
359static long subdev_ioctl(struct file *file, unsigned int cmd,
360 unsigned long arg)
361{
362 return video_usercopy(file, cmd, arg, subdev_do_ioctl);
363}
364
365static unsigned int subdev_poll(struct file *file, poll_table *wait)
366{
367 struct video_device *vdev = video_devdata(file);
368 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
369 struct v4l2_fh *fh = file->private_data;
370
371 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
372 return POLLERR;
373
374 poll_wait(file, &fh->wait, wait);
375
376 if (v4l2_event_pending(fh))
377 return POLLPRI;
378
379 return 0;
380}
381
382const struct v4l2_file_operations v4l2_subdev_fops = {
383 .owner = THIS_MODULE,
384 .open = subdev_open,
385 .unlocked_ioctl = subdev_ioctl,
386 .release = subdev_close,
387 .poll = subdev_poll,
388};
389
390#ifdef CONFIG_MEDIA_CONTROLLER
391int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
392 struct media_link *link,
393 struct v4l2_subdev_format *source_fmt,
394 struct v4l2_subdev_format *sink_fmt)
395{
396 if (source_fmt->format.width != sink_fmt->format.width
397 || source_fmt->format.height != sink_fmt->format.height
398 || source_fmt->format.code != sink_fmt->format.code)
399 return -EINVAL;
400
401 return 0;
402}
403EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
404
405static int
406v4l2_subdev_link_validate_get_format(struct media_pad *pad,
407 struct v4l2_subdev_format *fmt)
408{
409 switch (media_entity_type(pad->entity)) {
410 case MEDIA_ENT_T_V4L2_SUBDEV:
411 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
412 fmt->pad = pad->index;
413 return v4l2_subdev_call(media_entity_to_v4l2_subdev(
414 pad->entity),
415 pad, get_fmt, NULL, fmt);
416 default:
417 WARN(1, "Driver bug! Wrong media entity type %d, entity %s\n",
418 media_entity_type(pad->entity), pad->entity->name);
419 /* Fall through */
420 case MEDIA_ENT_T_DEVNODE_V4L:
421 return -EINVAL;
422 }
423}
424
425int v4l2_subdev_link_validate(struct media_link *link)
426{
427 struct v4l2_subdev *sink;
428 struct v4l2_subdev_format sink_fmt, source_fmt;
429 int rval;
430
431 rval = v4l2_subdev_link_validate_get_format(
432 link->source, &source_fmt);
433 if (rval < 0)
434 return 0;
435
436 rval = v4l2_subdev_link_validate_get_format(
437 link->sink, &sink_fmt);
438 if (rval < 0)
439 return 0;
440
441 sink = media_entity_to_v4l2_subdev(link->sink->entity);
442
443 rval = v4l2_subdev_call(sink, pad, link_validate, link,
444 &source_fmt, &sink_fmt);
445 if (rval != -ENOIOCTLCMD)
446 return rval;
447
448 return v4l2_subdev_link_validate_default(
449 sink, link, &source_fmt, &sink_fmt);
450}
451EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
452#endif /* CONFIG_MEDIA_CONTROLLER */
453
454void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
455{
456 INIT_LIST_HEAD(&sd->list);
457 BUG_ON(!ops);
458 sd->ops = ops;
459 sd->v4l2_dev = NULL;
460 sd->flags = 0;
461 sd->name[0] = '\0';
462 sd->grp_id = 0;
463 sd->dev_priv = NULL;
464 sd->host_priv = NULL;
465#if defined(CONFIG_MEDIA_CONTROLLER)
466 sd->entity.name = sd->name;
467 sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
468#endif
469}
470EXPORT_SYMBOL(v4l2_subdev_init);
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
new file mode 100644
index 000000000000..bf7a326b1cdc
--- /dev/null
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -0,0 +1,1189 @@
1/*
2 * generic helper functions for handling video4linux capture buffers
3 *
4 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
5 *
6 * Highly based on video-buf written originally by:
7 * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
8 * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
9 * (c) 2006 Ted Walther and John Sokol
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2
14 */
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <linux/mm.h>
20#include <linux/sched.h>
21#include <linux/slab.h>
22#include <linux/interrupt.h>
23
24#include <media/videobuf-core.h>
25
26#define MAGIC_BUFFER 0x20070728
27#define MAGIC_CHECK(is, should) \
28 do { \
29 if (unlikely((is) != (should))) { \
30 printk(KERN_ERR \
31 "magic mismatch: %x (expected %x)\n", \
32 is, should); \
33 BUG(); \
34 } \
35 } while (0)
36
37static int debug;
38module_param(debug, int, 0644);
39
40MODULE_DESCRIPTION("helper module to manage video4linux buffers");
41MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
42MODULE_LICENSE("GPL");
43
44#define dprintk(level, fmt, arg...) \
45 do { \
46 if (debug >= level) \
47 printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
48 } while (0)
49
50/* --------------------------------------------------------------------- */
51
52#define CALL(q, f, arg...) \
53 ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
54
55struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
56{
57 struct videobuf_buffer *vb;
58
59 BUG_ON(q->msize < sizeof(*vb));
60
61 if (!q->int_ops || !q->int_ops->alloc_vb) {
62 printk(KERN_ERR "No specific ops defined!\n");
63 BUG();
64 }
65
66 vb = q->int_ops->alloc_vb(q->msize);
67 if (NULL != vb) {
68 init_waitqueue_head(&vb->done);
69 vb->magic = MAGIC_BUFFER;
70 }
71
72 return vb;
73}
74EXPORT_SYMBOL_GPL(videobuf_alloc_vb);
75
76static int is_state_active_or_queued(struct videobuf_queue *q, struct videobuf_buffer *vb)
77{
78 unsigned long flags;
79 bool rc;
80
81 spin_lock_irqsave(q->irqlock, flags);
82 rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED;
83 spin_unlock_irqrestore(q->irqlock, flags);
84 return rc;
85};
86
87int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
88 int non_blocking, int intr)
89{
90 bool is_ext_locked;
91 int ret = 0;
92
93 MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
94
95 if (non_blocking) {
96 if (is_state_active_or_queued(q, vb))
97 return 0;
98 return -EAGAIN;
99 }
100
101 is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock);
102
103 /* Release vdev lock to prevent this wait from blocking outside access to
104 the device. */
105 if (is_ext_locked)
106 mutex_unlock(q->ext_lock);
107 if (intr)
108 ret = wait_event_interruptible(vb->done, is_state_active_or_queued(q, vb));
109 else
110 wait_event(vb->done, is_state_active_or_queued(q, vb));
111 /* Relock */
112 if (is_ext_locked)
113 mutex_lock(q->ext_lock);
114
115 return ret;
116}
117EXPORT_SYMBOL_GPL(videobuf_waiton);
118
119int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
120 struct v4l2_framebuffer *fbuf)
121{
122 MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
123 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
124
125 return CALL(q, iolock, q, vb, fbuf);
126}
127EXPORT_SYMBOL_GPL(videobuf_iolock);
128
129void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
130 struct videobuf_buffer *buf)
131{
132 if (q->int_ops->vaddr)
133 return q->int_ops->vaddr(buf);
134 return NULL;
135}
136EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
137
138/* --------------------------------------------------------------------- */
139
140
141void videobuf_queue_core_init(struct videobuf_queue *q,
142 const struct videobuf_queue_ops *ops,
143 struct device *dev,
144 spinlock_t *irqlock,
145 enum v4l2_buf_type type,
146 enum v4l2_field field,
147 unsigned int msize,
148 void *priv,
149 struct videobuf_qtype_ops *int_ops,
150 struct mutex *ext_lock)
151{
152 BUG_ON(!q);
153 memset(q, 0, sizeof(*q));
154 q->irqlock = irqlock;
155 q->ext_lock = ext_lock;
156 q->dev = dev;
157 q->type = type;
158 q->field = field;
159 q->msize = msize;
160 q->ops = ops;
161 q->priv_data = priv;
162 q->int_ops = int_ops;
163
164 /* All buffer operations are mandatory */
165 BUG_ON(!q->ops->buf_setup);
166 BUG_ON(!q->ops->buf_prepare);
167 BUG_ON(!q->ops->buf_queue);
168 BUG_ON(!q->ops->buf_release);
169
170 /* Lock is mandatory for queue_cancel to work */
171 BUG_ON(!irqlock);
172
173 /* Having implementations for abstract methods are mandatory */
174 BUG_ON(!q->int_ops);
175
176 mutex_init(&q->vb_lock);
177 init_waitqueue_head(&q->wait);
178 INIT_LIST_HEAD(&q->stream);
179}
180EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
181
182/* Locking: Only usage in bttv unsafe find way to remove */
183int videobuf_queue_is_busy(struct videobuf_queue *q)
184{
185 int i;
186
187 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
188
189 if (q->streaming) {
190 dprintk(1, "busy: streaming active\n");
191 return 1;
192 }
193 if (q->reading) {
194 dprintk(1, "busy: pending read #1\n");
195 return 1;
196 }
197 if (q->read_buf) {
198 dprintk(1, "busy: pending read #2\n");
199 return 1;
200 }
201 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
202 if (NULL == q->bufs[i])
203 continue;
204 if (q->bufs[i]->map) {
205 dprintk(1, "busy: buffer #%d mapped\n", i);
206 return 1;
207 }
208 if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
209 dprintk(1, "busy: buffer #%d queued\n", i);
210 return 1;
211 }
212 if (q->bufs[i]->state == VIDEOBUF_ACTIVE) {
213 dprintk(1, "busy: buffer #%d avtive\n", i);
214 return 1;
215 }
216 }
217 return 0;
218}
219EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
220
221/**
222 * __videobuf_free() - free all the buffers and their control structures
223 *
224 * This function can only be called if streaming/reading is off, i.e. no buffers
225 * are under control of the driver.
226 */
227/* Locking: Caller holds q->vb_lock */
228static int __videobuf_free(struct videobuf_queue *q)
229{
230 int i;
231
232 dprintk(1, "%s\n", __func__);
233 if (!q)
234 return 0;
235
236 if (q->streaming || q->reading) {
237 dprintk(1, "Cannot free buffers when streaming or reading\n");
238 return -EBUSY;
239 }
240
241 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
242
243 for (i = 0; i < VIDEO_MAX_FRAME; i++)
244 if (q->bufs[i] && q->bufs[i]->map) {
245 dprintk(1, "Cannot free mmapped buffers\n");
246 return -EBUSY;
247 }
248
249 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
250 if (NULL == q->bufs[i])
251 continue;
252 q->ops->buf_release(q, q->bufs[i]);
253 kfree(q->bufs[i]);
254 q->bufs[i] = NULL;
255 }
256
257 return 0;
258}
259
260/* Locking: Caller holds q->vb_lock */
261void videobuf_queue_cancel(struct videobuf_queue *q)
262{
263 unsigned long flags = 0;
264 int i;
265
266 q->streaming = 0;
267 q->reading = 0;
268 wake_up_interruptible_sync(&q->wait);
269
270 /* remove queued buffers from list */
271 spin_lock_irqsave(q->irqlock, flags);
272 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
273 if (NULL == q->bufs[i])
274 continue;
275 if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
276 list_del(&q->bufs[i]->queue);
277 q->bufs[i]->state = VIDEOBUF_ERROR;
278 wake_up_all(&q->bufs[i]->done);
279 }
280 }
281 spin_unlock_irqrestore(q->irqlock, flags);
282
283 /* free all buffers + clear queue */
284 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
285 if (NULL == q->bufs[i])
286 continue;
287 q->ops->buf_release(q, q->bufs[i]);
288 }
289 INIT_LIST_HEAD(&q->stream);
290}
291EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
292
293/* --------------------------------------------------------------------- */
294
295/* Locking: Caller holds q->vb_lock */
296enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
297{
298 enum v4l2_field field = q->field;
299
300 BUG_ON(V4L2_FIELD_ANY == field);
301
302 if (V4L2_FIELD_ALTERNATE == field) {
303 if (V4L2_FIELD_TOP == q->last) {
304 field = V4L2_FIELD_BOTTOM;
305 q->last = V4L2_FIELD_BOTTOM;
306 } else {
307 field = V4L2_FIELD_TOP;
308 q->last = V4L2_FIELD_TOP;
309 }
310 }
311 return field;
312}
313EXPORT_SYMBOL_GPL(videobuf_next_field);
314
315/* Locking: Caller holds q->vb_lock */
316static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
317 struct videobuf_buffer *vb, enum v4l2_buf_type type)
318{
319 MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
320 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
321
322 b->index = vb->i;
323 b->type = type;
324
325 b->memory = vb->memory;
326 switch (b->memory) {
327 case V4L2_MEMORY_MMAP:
328 b->m.offset = vb->boff;
329 b->length = vb->bsize;
330 break;
331 case V4L2_MEMORY_USERPTR:
332 b->m.userptr = vb->baddr;
333 b->length = vb->bsize;
334 break;
335 case V4L2_MEMORY_OVERLAY:
336 b->m.offset = vb->boff;
337 break;
338 }
339
340 b->flags = 0;
341 if (vb->map)
342 b->flags |= V4L2_BUF_FLAG_MAPPED;
343
344 switch (vb->state) {
345 case VIDEOBUF_PREPARED:
346 case VIDEOBUF_QUEUED:
347 case VIDEOBUF_ACTIVE:
348 b->flags |= V4L2_BUF_FLAG_QUEUED;
349 break;
350 case VIDEOBUF_ERROR:
351 b->flags |= V4L2_BUF_FLAG_ERROR;
352 /* fall through */
353 case VIDEOBUF_DONE:
354 b->flags |= V4L2_BUF_FLAG_DONE;
355 break;
356 case VIDEOBUF_NEEDS_INIT:
357 case VIDEOBUF_IDLE:
358 /* nothing */
359 break;
360 }
361
362 b->field = vb->field;
363 b->timestamp = vb->ts;
364 b->bytesused = vb->size;
365 b->sequence = vb->field_count >> 1;
366}
367
368int videobuf_mmap_free(struct videobuf_queue *q)
369{
370 int ret;
371 videobuf_queue_lock(q);
372 ret = __videobuf_free(q);
373 videobuf_queue_unlock(q);
374 return ret;
375}
376EXPORT_SYMBOL_GPL(videobuf_mmap_free);
377
378/* Locking: Caller holds q->vb_lock */
379int __videobuf_mmap_setup(struct videobuf_queue *q,
380 unsigned int bcount, unsigned int bsize,
381 enum v4l2_memory memory)
382{
383 unsigned int i;
384 int err;
385
386 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
387
388 err = __videobuf_free(q);
389 if (0 != err)
390 return err;
391
392 /* Allocate and initialize buffers */
393 for (i = 0; i < bcount; i++) {
394 q->bufs[i] = videobuf_alloc_vb(q);
395
396 if (NULL == q->bufs[i])
397 break;
398
399 q->bufs[i]->i = i;
400 q->bufs[i]->memory = memory;
401 q->bufs[i]->bsize = bsize;
402 switch (memory) {
403 case V4L2_MEMORY_MMAP:
404 q->bufs[i]->boff = PAGE_ALIGN(bsize) * i;
405 break;
406 case V4L2_MEMORY_USERPTR:
407 case V4L2_MEMORY_OVERLAY:
408 /* nothing */
409 break;
410 }
411 }
412
413 if (!i)
414 return -ENOMEM;
415
416 dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
417
418 return i;
419}
420EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
421
422int videobuf_mmap_setup(struct videobuf_queue *q,
423 unsigned int bcount, unsigned int bsize,
424 enum v4l2_memory memory)
425{
426 int ret;
427 videobuf_queue_lock(q);
428 ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
429 videobuf_queue_unlock(q);
430 return ret;
431}
432EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
433
434int videobuf_reqbufs(struct videobuf_queue *q,
435 struct v4l2_requestbuffers *req)
436{
437 unsigned int size, count;
438 int retval;
439
440 if (req->count < 1) {
441 dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
442 return -EINVAL;
443 }
444
445 if (req->memory != V4L2_MEMORY_MMAP &&
446 req->memory != V4L2_MEMORY_USERPTR &&
447 req->memory != V4L2_MEMORY_OVERLAY) {
448 dprintk(1, "reqbufs: memory type invalid\n");
449 return -EINVAL;
450 }
451
452 videobuf_queue_lock(q);
453 if (req->type != q->type) {
454 dprintk(1, "reqbufs: queue type invalid\n");
455 retval = -EINVAL;
456 goto done;
457 }
458
459 if (q->streaming) {
460 dprintk(1, "reqbufs: streaming already exists\n");
461 retval = -EBUSY;
462 goto done;
463 }
464 if (!list_empty(&q->stream)) {
465 dprintk(1, "reqbufs: stream running\n");
466 retval = -EBUSY;
467 goto done;
468 }
469
470 count = req->count;
471 if (count > VIDEO_MAX_FRAME)
472 count = VIDEO_MAX_FRAME;
473 size = 0;
474 q->ops->buf_setup(q, &count, &size);
475 dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
476 count, size,
477 (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
478
479 retval = __videobuf_mmap_setup(q, count, size, req->memory);
480 if (retval < 0) {
481 dprintk(1, "reqbufs: mmap setup returned %d\n", retval);
482 goto done;
483 }
484
485 req->count = retval;
486 retval = 0;
487
488 done:
489 videobuf_queue_unlock(q);
490 return retval;
491}
492EXPORT_SYMBOL_GPL(videobuf_reqbufs);
493
494int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
495{
496 int ret = -EINVAL;
497
498 videobuf_queue_lock(q);
499 if (unlikely(b->type != q->type)) {
500 dprintk(1, "querybuf: Wrong type.\n");
501 goto done;
502 }
503 if (unlikely(b->index >= VIDEO_MAX_FRAME)) {
504 dprintk(1, "querybuf: index out of range.\n");
505 goto done;
506 }
507 if (unlikely(NULL == q->bufs[b->index])) {
508 dprintk(1, "querybuf: buffer is null.\n");
509 goto done;
510 }
511
512 videobuf_status(q, b, q->bufs[b->index], q->type);
513
514 ret = 0;
515done:
516 videobuf_queue_unlock(q);
517 return ret;
518}
519EXPORT_SYMBOL_GPL(videobuf_querybuf);
520
521int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
522{
523 struct videobuf_buffer *buf;
524 enum v4l2_field field;
525 unsigned long flags = 0;
526 int retval;
527
528 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
529
530 if (b->memory == V4L2_MEMORY_MMAP)
531 down_read(&current->mm->mmap_sem);
532
533 videobuf_queue_lock(q);
534 retval = -EBUSY;
535 if (q->reading) {
536 dprintk(1, "qbuf: Reading running...\n");
537 goto done;
538 }
539 retval = -EINVAL;
540 if (b->type != q->type) {
541 dprintk(1, "qbuf: Wrong type.\n");
542 goto done;
543 }
544 if (b->index >= VIDEO_MAX_FRAME) {
545 dprintk(1, "qbuf: index out of range.\n");
546 goto done;
547 }
548 buf = q->bufs[b->index];
549 if (NULL == buf) {
550 dprintk(1, "qbuf: buffer is null.\n");
551 goto done;
552 }
553 MAGIC_CHECK(buf->magic, MAGIC_BUFFER);
554 if (buf->memory != b->memory) {
555 dprintk(1, "qbuf: memory type is wrong.\n");
556 goto done;
557 }
558 if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) {
559 dprintk(1, "qbuf: buffer is already queued or active.\n");
560 goto done;
561 }
562
563 switch (b->memory) {
564 case V4L2_MEMORY_MMAP:
565 if (0 == buf->baddr) {
566 dprintk(1, "qbuf: mmap requested "
567 "but buffer addr is zero!\n");
568 goto done;
569 }
570 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
571 || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
572 || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
573 buf->size = b->bytesused;
574 buf->field = b->field;
575 buf->ts = b->timestamp;
576 }
577 break;
578 case V4L2_MEMORY_USERPTR:
579 if (b->length < buf->bsize) {
580 dprintk(1, "qbuf: buffer length is not enough\n");
581 goto done;
582 }
583 if (VIDEOBUF_NEEDS_INIT != buf->state &&
584 buf->baddr != b->m.userptr)
585 q->ops->buf_release(q, buf);
586 buf->baddr = b->m.userptr;
587 break;
588 case V4L2_MEMORY_OVERLAY:
589 buf->boff = b->m.offset;
590 break;
591 default:
592 dprintk(1, "qbuf: wrong memory type\n");
593 goto done;
594 }
595
596 dprintk(1, "qbuf: requesting next field\n");
597 field = videobuf_next_field(q);
598 retval = q->ops->buf_prepare(q, buf, field);
599 if (0 != retval) {
600 dprintk(1, "qbuf: buffer_prepare returned %d\n", retval);
601 goto done;
602 }
603
604 list_add_tail(&buf->stream, &q->stream);
605 if (q->streaming) {
606 spin_lock_irqsave(q->irqlock, flags);
607 q->ops->buf_queue(q, buf);
608 spin_unlock_irqrestore(q->irqlock, flags);
609 }
610 dprintk(1, "qbuf: succeeded\n");
611 retval = 0;
612 wake_up_interruptible_sync(&q->wait);
613
614done:
615 videobuf_queue_unlock(q);
616
617 if (b->memory == V4L2_MEMORY_MMAP)
618 up_read(&current->mm->mmap_sem);
619
620 return retval;
621}
622EXPORT_SYMBOL_GPL(videobuf_qbuf);
623
624/* Locking: Caller holds q->vb_lock */
625static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
626{
627 int retval;
628
629checks:
630 if (!q->streaming) {
631 dprintk(1, "next_buffer: Not streaming\n");
632 retval = -EINVAL;
633 goto done;
634 }
635
636 if (list_empty(&q->stream)) {
637 if (noblock) {
638 retval = -EAGAIN;
639 dprintk(2, "next_buffer: no buffers to dequeue\n");
640 goto done;
641 } else {
642 dprintk(2, "next_buffer: waiting on buffer\n");
643
644 /* Drop lock to avoid deadlock with qbuf */
645 videobuf_queue_unlock(q);
646
647 /* Checking list_empty and streaming is safe without
648 * locks because we goto checks to validate while
649 * holding locks before proceeding */
650 retval = wait_event_interruptible(q->wait,
651 !list_empty(&q->stream) || !q->streaming);
652 videobuf_queue_lock(q);
653
654 if (retval)
655 goto done;
656
657 goto checks;
658 }
659 }
660
661 retval = 0;
662
663done:
664 return retval;
665}
666
667/* Locking: Caller holds q->vb_lock */
668static int stream_next_buffer(struct videobuf_queue *q,
669 struct videobuf_buffer **vb, int nonblocking)
670{
671 int retval;
672 struct videobuf_buffer *buf = NULL;
673
674 retval = stream_next_buffer_check_queue(q, nonblocking);
675 if (retval)
676 goto done;
677
678 buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
679 retval = videobuf_waiton(q, buf, nonblocking, 1);
680 if (retval < 0)
681 goto done;
682
683 *vb = buf;
684done:
685 return retval;
686}
687
688int videobuf_dqbuf(struct videobuf_queue *q,
689 struct v4l2_buffer *b, int nonblocking)
690{
691 struct videobuf_buffer *buf = NULL;
692 int retval;
693
694 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
695
696 memset(b, 0, sizeof(*b));
697 videobuf_queue_lock(q);
698
699 retval = stream_next_buffer(q, &buf, nonblocking);
700 if (retval < 0) {
701 dprintk(1, "dqbuf: next_buffer error: %i\n", retval);
702 goto done;
703 }
704
705 switch (buf->state) {
706 case VIDEOBUF_ERROR:
707 dprintk(1, "dqbuf: state is error\n");
708 break;
709 case VIDEOBUF_DONE:
710 dprintk(1, "dqbuf: state is done\n");
711 break;
712 default:
713 dprintk(1, "dqbuf: state invalid\n");
714 retval = -EINVAL;
715 goto done;
716 }
717 CALL(q, sync, q, buf);
718 videobuf_status(q, b, buf, q->type);
719 list_del(&buf->stream);
720 buf->state = VIDEOBUF_IDLE;
721 b->flags &= ~V4L2_BUF_FLAG_DONE;
722done:
723 videobuf_queue_unlock(q);
724 return retval;
725}
726EXPORT_SYMBOL_GPL(videobuf_dqbuf);
727
728int videobuf_streamon(struct videobuf_queue *q)
729{
730 struct videobuf_buffer *buf;
731 unsigned long flags = 0;
732 int retval;
733
734 videobuf_queue_lock(q);
735 retval = -EBUSY;
736 if (q->reading)
737 goto done;
738 retval = 0;
739 if (q->streaming)
740 goto done;
741 q->streaming = 1;
742 spin_lock_irqsave(q->irqlock, flags);
743 list_for_each_entry(buf, &q->stream, stream)
744 if (buf->state == VIDEOBUF_PREPARED)
745 q->ops->buf_queue(q, buf);
746 spin_unlock_irqrestore(q->irqlock, flags);
747
748 wake_up_interruptible_sync(&q->wait);
749done:
750 videobuf_queue_unlock(q);
751 return retval;
752}
753EXPORT_SYMBOL_GPL(videobuf_streamon);
754
755/* Locking: Caller holds q->vb_lock */
756static int __videobuf_streamoff(struct videobuf_queue *q)
757{
758 if (!q->streaming)
759 return -EINVAL;
760
761 videobuf_queue_cancel(q);
762
763 return 0;
764}
765
766int videobuf_streamoff(struct videobuf_queue *q)
767{
768 int retval;
769
770 videobuf_queue_lock(q);
771 retval = __videobuf_streamoff(q);
772 videobuf_queue_unlock(q);
773
774 return retval;
775}
776EXPORT_SYMBOL_GPL(videobuf_streamoff);
777
778/* Locking: Caller holds q->vb_lock */
779static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
780 char __user *data,
781 size_t count, loff_t *ppos)
782{
783 enum v4l2_field field;
784 unsigned long flags = 0;
785 int retval;
786
787 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
788
789 /* setup stuff */
790 q->read_buf = videobuf_alloc_vb(q);
791 if (NULL == q->read_buf)
792 return -ENOMEM;
793
794 q->read_buf->memory = V4L2_MEMORY_USERPTR;
795 q->read_buf->baddr = (unsigned long)data;
796 q->read_buf->bsize = count;
797
798 field = videobuf_next_field(q);
799 retval = q->ops->buf_prepare(q, q->read_buf, field);
800 if (0 != retval)
801 goto done;
802
803 /* start capture & wait */
804 spin_lock_irqsave(q->irqlock, flags);
805 q->ops->buf_queue(q, q->read_buf);
806 spin_unlock_irqrestore(q->irqlock, flags);
807 retval = videobuf_waiton(q, q->read_buf, 0, 0);
808 if (0 == retval) {
809 CALL(q, sync, q, q->read_buf);
810 if (VIDEOBUF_ERROR == q->read_buf->state)
811 retval = -EIO;
812 else
813 retval = q->read_buf->size;
814 }
815
816done:
817 /* cleanup */
818 q->ops->buf_release(q, q->read_buf);
819 kfree(q->read_buf);
820 q->read_buf = NULL;
821 return retval;
822}
823
824static int __videobuf_copy_to_user(struct videobuf_queue *q,
825 struct videobuf_buffer *buf,
826 char __user *data, size_t count,
827 int nonblocking)
828{
829 void *vaddr = CALL(q, vaddr, buf);
830
831 /* copy to userspace */
832 if (count > buf->size - q->read_off)
833 count = buf->size - q->read_off;
834
835 if (copy_to_user(data, vaddr + q->read_off, count))
836 return -EFAULT;
837
838 return count;
839}
840
841static int __videobuf_copy_stream(struct videobuf_queue *q,
842 struct videobuf_buffer *buf,
843 char __user *data, size_t count, size_t pos,
844 int vbihack, int nonblocking)
845{
846 unsigned int *fc = CALL(q, vaddr, buf);
847
848 if (vbihack) {
849 /* dirty, undocumented hack -- pass the frame counter
850 * within the last four bytes of each vbi data block.
851 * We need that one to maintain backward compatibility
852 * to all vbi decoding software out there ... */
853 fc += (buf->size >> 2) - 1;
854 *fc = buf->field_count >> 1;
855 dprintk(1, "vbihack: %d\n", *fc);
856 }
857
858 /* copy stuff using the common method */
859 count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
860
861 if ((count == -EFAULT) && (pos == 0))
862 return -EFAULT;
863
864 return count;
865}
866
867ssize_t videobuf_read_one(struct videobuf_queue *q,
868 char __user *data, size_t count, loff_t *ppos,
869 int nonblocking)
870{
871 enum v4l2_field field;
872 unsigned long flags = 0;
873 unsigned size = 0, nbufs = 1;
874 int retval;
875
876 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
877
878 videobuf_queue_lock(q);
879
880 q->ops->buf_setup(q, &nbufs, &size);
881
882 if (NULL == q->read_buf &&
883 count >= size &&
884 !nonblocking) {
885 retval = videobuf_read_zerocopy(q, data, count, ppos);
886 if (retval >= 0 || retval == -EIO)
887 /* ok, all done */
888 goto done;
889 /* fallback to kernel bounce buffer on failures */
890 }
891
892 if (NULL == q->read_buf) {
893 /* need to capture a new frame */
894 retval = -ENOMEM;
895 q->read_buf = videobuf_alloc_vb(q);
896
897 dprintk(1, "video alloc=0x%p\n", q->read_buf);
898 if (NULL == q->read_buf)
899 goto done;
900 q->read_buf->memory = V4L2_MEMORY_USERPTR;
901 q->read_buf->bsize = count; /* preferred size */
902 field = videobuf_next_field(q);
903 retval = q->ops->buf_prepare(q, q->read_buf, field);
904
905 if (0 != retval) {
906 kfree(q->read_buf);
907 q->read_buf = NULL;
908 goto done;
909 }
910
911 spin_lock_irqsave(q->irqlock, flags);
912 q->ops->buf_queue(q, q->read_buf);
913 spin_unlock_irqrestore(q->irqlock, flags);
914
915 q->read_off = 0;
916 }
917
918 /* wait until capture is done */
919 retval = videobuf_waiton(q, q->read_buf, nonblocking, 1);
920 if (0 != retval)
921 goto done;
922
923 CALL(q, sync, q, q->read_buf);
924
925 if (VIDEOBUF_ERROR == q->read_buf->state) {
926 /* catch I/O errors */
927 q->ops->buf_release(q, q->read_buf);
928 kfree(q->read_buf);
929 q->read_buf = NULL;
930 retval = -EIO;
931 goto done;
932 }
933
934 /* Copy to userspace */
935 retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
936 if (retval < 0)
937 goto done;
938
939 q->read_off += retval;
940 if (q->read_off == q->read_buf->size) {
941 /* all data copied, cleanup */
942 q->ops->buf_release(q, q->read_buf);
943 kfree(q->read_buf);
944 q->read_buf = NULL;
945 }
946
947done:
948 videobuf_queue_unlock(q);
949 return retval;
950}
951EXPORT_SYMBOL_GPL(videobuf_read_one);
952
953/* Locking: Caller holds q->vb_lock */
954static int __videobuf_read_start(struct videobuf_queue *q)
955{
956 enum v4l2_field field;
957 unsigned long flags = 0;
958 unsigned int count = 0, size = 0;
959 int err, i;
960
961 q->ops->buf_setup(q, &count, &size);
962 if (count < 2)
963 count = 2;
964 if (count > VIDEO_MAX_FRAME)
965 count = VIDEO_MAX_FRAME;
966 size = PAGE_ALIGN(size);
967
968 err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR);
969 if (err < 0)
970 return err;
971
972 count = err;
973
974 for (i = 0; i < count; i++) {
975 field = videobuf_next_field(q);
976 err = q->ops->buf_prepare(q, q->bufs[i], field);
977 if (err)
978 return err;
979 list_add_tail(&q->bufs[i]->stream, &q->stream);
980 }
981 spin_lock_irqsave(q->irqlock, flags);
982 for (i = 0; i < count; i++)
983 q->ops->buf_queue(q, q->bufs[i]);
984 spin_unlock_irqrestore(q->irqlock, flags);
985 q->reading = 1;
986 return 0;
987}
988
989static void __videobuf_read_stop(struct videobuf_queue *q)
990{
991 int i;
992
993 videobuf_queue_cancel(q);
994 __videobuf_free(q);
995 INIT_LIST_HEAD(&q->stream);
996 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
997 if (NULL == q->bufs[i])
998 continue;
999 kfree(q->bufs[i]);
1000 q->bufs[i] = NULL;
1001 }
1002 q->read_buf = NULL;
1003}
1004
1005int videobuf_read_start(struct videobuf_queue *q)
1006{
1007 int rc;
1008
1009 videobuf_queue_lock(q);
1010 rc = __videobuf_read_start(q);
1011 videobuf_queue_unlock(q);
1012
1013 return rc;
1014}
1015EXPORT_SYMBOL_GPL(videobuf_read_start);
1016
1017void videobuf_read_stop(struct videobuf_queue *q)
1018{
1019 videobuf_queue_lock(q);
1020 __videobuf_read_stop(q);
1021 videobuf_queue_unlock(q);
1022}
1023EXPORT_SYMBOL_GPL(videobuf_read_stop);
1024
1025void videobuf_stop(struct videobuf_queue *q)
1026{
1027 videobuf_queue_lock(q);
1028
1029 if (q->streaming)
1030 __videobuf_streamoff(q);
1031
1032 if (q->reading)
1033 __videobuf_read_stop(q);
1034
1035 videobuf_queue_unlock(q);
1036}
1037EXPORT_SYMBOL_GPL(videobuf_stop);
1038
1039ssize_t videobuf_read_stream(struct videobuf_queue *q,
1040 char __user *data, size_t count, loff_t *ppos,
1041 int vbihack, int nonblocking)
1042{
1043 int rc, retval;
1044 unsigned long flags = 0;
1045
1046 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
1047
1048 dprintk(2, "%s\n", __func__);
1049 videobuf_queue_lock(q);
1050 retval = -EBUSY;
1051 if (q->streaming)
1052 goto done;
1053 if (!q->reading) {
1054 retval = __videobuf_read_start(q);
1055 if (retval < 0)
1056 goto done;
1057 }
1058
1059 retval = 0;
1060 while (count > 0) {
1061 /* get / wait for data */
1062 if (NULL == q->read_buf) {
1063 q->read_buf = list_entry(q->stream.next,
1064 struct videobuf_buffer,
1065 stream);
1066 list_del(&q->read_buf->stream);
1067 q->read_off = 0;
1068 }
1069 rc = videobuf_waiton(q, q->read_buf, nonblocking, 1);
1070 if (rc < 0) {
1071 if (0 == retval)
1072 retval = rc;
1073 break;
1074 }
1075
1076 if (q->read_buf->state == VIDEOBUF_DONE) {
1077 rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
1078 retval, vbihack, nonblocking);
1079 if (rc < 0) {
1080 retval = rc;
1081 break;
1082 }
1083 retval += rc;
1084 count -= rc;
1085 q->read_off += rc;
1086 } else {
1087 /* some error */
1088 q->read_off = q->read_buf->size;
1089 if (0 == retval)
1090 retval = -EIO;
1091 }
1092
1093 /* requeue buffer when done with copying */
1094 if (q->read_off == q->read_buf->size) {
1095 list_add_tail(&q->read_buf->stream,
1096 &q->stream);
1097 spin_lock_irqsave(q->irqlock, flags);
1098 q->ops->buf_queue(q, q->read_buf);
1099 spin_unlock_irqrestore(q->irqlock, flags);
1100 q->read_buf = NULL;
1101 }
1102 if (retval < 0)
1103 break;
1104 }
1105
1106done:
1107 videobuf_queue_unlock(q);
1108 return retval;
1109}
1110EXPORT_SYMBOL_GPL(videobuf_read_stream);
1111
1112unsigned int videobuf_poll_stream(struct file *file,
1113 struct videobuf_queue *q,
1114 poll_table *wait)
1115{
1116 unsigned long req_events = poll_requested_events(wait);
1117 struct videobuf_buffer *buf = NULL;
1118 unsigned int rc = 0;
1119
1120 videobuf_queue_lock(q);
1121 if (q->streaming) {
1122 if (!list_empty(&q->stream))
1123 buf = list_entry(q->stream.next,
1124 struct videobuf_buffer, stream);
1125 } else if (req_events & (POLLIN | POLLRDNORM)) {
1126 if (!q->reading)
1127 __videobuf_read_start(q);
1128 if (!q->reading) {
1129 rc = POLLERR;
1130 } else if (NULL == q->read_buf) {
1131 q->read_buf = list_entry(q->stream.next,
1132 struct videobuf_buffer,
1133 stream);
1134 list_del(&q->read_buf->stream);
1135 q->read_off = 0;
1136 }
1137 buf = q->read_buf;
1138 }
1139 if (!buf)
1140 rc = POLLERR;
1141
1142 if (0 == rc) {
1143 poll_wait(file, &buf->done, wait);
1144 if (buf->state == VIDEOBUF_DONE ||
1145 buf->state == VIDEOBUF_ERROR) {
1146 switch (q->type) {
1147 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1148 case V4L2_BUF_TYPE_VBI_OUTPUT:
1149 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
1150 rc = POLLOUT | POLLWRNORM;
1151 break;
1152 default:
1153 rc = POLLIN | POLLRDNORM;
1154 break;
1155 }
1156 }
1157 }
1158 videobuf_queue_unlock(q);
1159 return rc;
1160}
1161EXPORT_SYMBOL_GPL(videobuf_poll_stream);
1162
1163int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
1164{
1165 int rc = -EINVAL;
1166 int i;
1167
1168 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
1169
1170 if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
1171 dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
1172 return -EINVAL;
1173 }
1174
1175 videobuf_queue_lock(q);
1176 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
1177 struct videobuf_buffer *buf = q->bufs[i];
1178
1179 if (buf && buf->memory == V4L2_MEMORY_MMAP &&
1180 buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
1181 rc = CALL(q, mmap_mapper, q, buf, vma);
1182 break;
1183 }
1184 }
1185 videobuf_queue_unlock(q);
1186
1187 return rc;
1188}
1189EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
new file mode 100644
index 000000000000..3a43ba0959bf
--- /dev/null
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -0,0 +1,510 @@
1/*
2 * helper functions for physically contiguous capture buffers
3 *
4 * The functions support hardware lacking scatter gather support
5 * (i.e. the buffers must be linear in physical memory)
6 *
7 * Copyright (c) 2008 Magnus Damm
8 *
9 * Based on videobuf-vmalloc.c,
10 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2
15 */
16
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/mm.h>
20#include <linux/pagemap.h>
21#include <linux/dma-mapping.h>
22#include <linux/sched.h>
23#include <linux/slab.h>
24#include <media/videobuf-dma-contig.h>
25
26struct videobuf_dma_contig_memory {
27 u32 magic;
28 void *vaddr;
29 dma_addr_t dma_handle;
30 bool cached;
31 unsigned long size;
32};
33
34#define MAGIC_DC_MEM 0x0733ac61
35#define MAGIC_CHECK(is, should) \
36 if (unlikely((is) != (should))) { \
37 pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
38 BUG(); \
39 }
40
41static int __videobuf_dc_alloc(struct device *dev,
42 struct videobuf_dma_contig_memory *mem,
43 unsigned long size, gfp_t flags)
44{
45 mem->size = size;
46 if (mem->cached) {
47 mem->vaddr = alloc_pages_exact(mem->size, flags | GFP_DMA);
48 if (mem->vaddr) {
49 int err;
50
51 mem->dma_handle = dma_map_single(dev, mem->vaddr,
52 mem->size,
53 DMA_FROM_DEVICE);
54 err = dma_mapping_error(dev, mem->dma_handle);
55 if (err) {
56 dev_err(dev, "dma_map_single failed\n");
57
58 free_pages_exact(mem->vaddr, mem->size);
59 mem->vaddr = NULL;
60 return err;
61 }
62 }
63 } else
64 mem->vaddr = dma_alloc_coherent(dev, mem->size,
65 &mem->dma_handle, flags);
66
67 if (!mem->vaddr) {
68 dev_err(dev, "memory alloc size %ld failed\n", mem->size);
69 return -ENOMEM;
70 }
71
72 dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
73
74 return 0;
75}
76
77static void __videobuf_dc_free(struct device *dev,
78 struct videobuf_dma_contig_memory *mem)
79{
80 if (mem->cached) {
81 if (!mem->vaddr)
82 return;
83 dma_unmap_single(dev, mem->dma_handle, mem->size,
84 DMA_FROM_DEVICE);
85 free_pages_exact(mem->vaddr, mem->size);
86 } else
87 dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
88
89 mem->vaddr = NULL;
90}
91
92static void videobuf_vm_open(struct vm_area_struct *vma)
93{
94 struct videobuf_mapping *map = vma->vm_private_data;
95
96 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
97 map, map->count, vma->vm_start, vma->vm_end);
98
99 map->count++;
100}
101
102static void videobuf_vm_close(struct vm_area_struct *vma)
103{
104 struct videobuf_mapping *map = vma->vm_private_data;
105 struct videobuf_queue *q = map->q;
106 int i;
107
108 dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
109 map, map->count, vma->vm_start, vma->vm_end);
110
111 map->count--;
112 if (0 == map->count) {
113 struct videobuf_dma_contig_memory *mem;
114
115 dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
116 videobuf_queue_lock(q);
117
118 /* We need first to cancel streams, before unmapping */
119 if (q->streaming)
120 videobuf_queue_cancel(q);
121
122 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
123 if (NULL == q->bufs[i])
124 continue;
125
126 if (q->bufs[i]->map != map)
127 continue;
128
129 mem = q->bufs[i]->priv;
130 if (mem) {
131 /* This callback is called only if kernel has
132 allocated memory and this memory is mmapped.
133 In this case, memory should be freed,
134 in order to do memory unmap.
135 */
136
137 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
138
139 /* vfree is not atomic - can't be
140 called with IRQ's disabled
141 */
142 dev_dbg(q->dev, "buf[%d] freeing %p\n",
143 i, mem->vaddr);
144
145 __videobuf_dc_free(q->dev, mem);
146 mem->vaddr = NULL;
147 }
148
149 q->bufs[i]->map = NULL;
150 q->bufs[i]->baddr = 0;
151 }
152
153 kfree(map);
154
155 videobuf_queue_unlock(q);
156 }
157}
158
159static const struct vm_operations_struct videobuf_vm_ops = {
160 .open = videobuf_vm_open,
161 .close = videobuf_vm_close,
162};
163
164/**
165 * videobuf_dma_contig_user_put() - reset pointer to user space buffer
166 * @mem: per-buffer private videobuf-dma-contig data
167 *
168 * This function resets the user space pointer
169 */
170static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
171{
172 mem->dma_handle = 0;
173 mem->size = 0;
174}
175
176/**
177 * videobuf_dma_contig_user_get() - setup user space memory pointer
178 * @mem: per-buffer private videobuf-dma-contig data
179 * @vb: video buffer to map
180 *
181 * This function validates and sets up a pointer to user space memory.
182 * Only physically contiguous pfn-mapped memory is accepted.
183 *
184 * Returns 0 if successful.
185 */
186static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
187 struct videobuf_buffer *vb)
188{
189 struct mm_struct *mm = current->mm;
190 struct vm_area_struct *vma;
191 unsigned long prev_pfn, this_pfn;
192 unsigned long pages_done, user_address;
193 unsigned int offset;
194 int ret;
195
196 offset = vb->baddr & ~PAGE_MASK;
197 mem->size = PAGE_ALIGN(vb->size + offset);
198 ret = -EINVAL;
199
200 down_read(&mm->mmap_sem);
201
202 vma = find_vma(mm, vb->baddr);
203 if (!vma)
204 goto out_up;
205
206 if ((vb->baddr + mem->size) > vma->vm_end)
207 goto out_up;
208
209 pages_done = 0;
210 prev_pfn = 0; /* kill warning */
211 user_address = vb->baddr;
212
213 while (pages_done < (mem->size >> PAGE_SHIFT)) {
214 ret = follow_pfn(vma, user_address, &this_pfn);
215 if (ret)
216 break;
217
218 if (pages_done == 0)
219 mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
220 else if (this_pfn != (prev_pfn + 1))
221 ret = -EFAULT;
222
223 if (ret)
224 break;
225
226 prev_pfn = this_pfn;
227 user_address += PAGE_SIZE;
228 pages_done++;
229 }
230
231out_up:
232 up_read(&current->mm->mmap_sem);
233
234 return ret;
235}
236
237static struct videobuf_buffer *__videobuf_alloc_vb(size_t size, bool cached)
238{
239 struct videobuf_dma_contig_memory *mem;
240 struct videobuf_buffer *vb;
241
242 vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
243 if (vb) {
244 vb->priv = ((char *)vb) + size;
245 mem = vb->priv;
246 mem->magic = MAGIC_DC_MEM;
247 mem->cached = cached;
248 }
249
250 return vb;
251}
252
253static struct videobuf_buffer *__videobuf_alloc_uncached(size_t size)
254{
255 return __videobuf_alloc_vb(size, false);
256}
257
258static struct videobuf_buffer *__videobuf_alloc_cached(size_t size)
259{
260 return __videobuf_alloc_vb(size, true);
261}
262
263static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
264{
265 struct videobuf_dma_contig_memory *mem = buf->priv;
266
267 BUG_ON(!mem);
268 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
269
270 return mem->vaddr;
271}
272
273static int __videobuf_iolock(struct videobuf_queue *q,
274 struct videobuf_buffer *vb,
275 struct v4l2_framebuffer *fbuf)
276{
277 struct videobuf_dma_contig_memory *mem = vb->priv;
278
279 BUG_ON(!mem);
280 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
281
282 switch (vb->memory) {
283 case V4L2_MEMORY_MMAP:
284 dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
285
286 /* All handling should be done by __videobuf_mmap_mapper() */
287 if (!mem->vaddr) {
288 dev_err(q->dev, "memory is not alloced/mmapped.\n");
289 return -EINVAL;
290 }
291 break;
292 case V4L2_MEMORY_USERPTR:
293 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
294
295 /* handle pointer from user space */
296 if (vb->baddr)
297 return videobuf_dma_contig_user_get(mem, vb);
298
299 /* allocate memory for the read() method */
300 if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
301 GFP_KERNEL))
302 return -ENOMEM;
303 break;
304 case V4L2_MEMORY_OVERLAY:
305 default:
306 dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
307 return -EINVAL;
308 }
309
310 return 0;
311}
312
313static int __videobuf_sync(struct videobuf_queue *q,
314 struct videobuf_buffer *buf)
315{
316 struct videobuf_dma_contig_memory *mem = buf->priv;
317 BUG_ON(!mem);
318 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
319
320 dma_sync_single_for_cpu(q->dev, mem->dma_handle, mem->size,
321 DMA_FROM_DEVICE);
322
323 return 0;
324}
325
326static int __videobuf_mmap_mapper(struct videobuf_queue *q,
327 struct videobuf_buffer *buf,
328 struct vm_area_struct *vma)
329{
330 struct videobuf_dma_contig_memory *mem;
331 struct videobuf_mapping *map;
332 int retval;
333 unsigned long size;
334 unsigned long pos, start = vma->vm_start;
335 struct page *page;
336
337 dev_dbg(q->dev, "%s\n", __func__);
338
339 /* create mapping + update buffer list */
340 map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
341 if (!map)
342 return -ENOMEM;
343
344 buf->map = map;
345 map->q = q;
346
347 buf->baddr = vma->vm_start;
348
349 mem = buf->priv;
350 BUG_ON(!mem);
351 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
352
353 if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
354 GFP_KERNEL | __GFP_COMP))
355 goto error;
356
357 /* Try to remap memory */
358
359 size = vma->vm_end - vma->vm_start;
360 size = (size < mem->size) ? size : mem->size;
361
362 if (!mem->cached) {
363 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
364 retval = remap_pfn_range(vma, vma->vm_start,
365 mem->dma_handle >> PAGE_SHIFT,
366 size, vma->vm_page_prot);
367 if (retval) {
368 dev_err(q->dev, "mmap: remap failed with error %d. ",
369 retval);
370 dma_free_coherent(q->dev, mem->size,
371 mem->vaddr, mem->dma_handle);
372 goto error;
373 }
374 } else {
375 pos = (unsigned long)mem->vaddr;
376
377 while (size > 0) {
378 page = virt_to_page((void *)pos);
379 if (NULL == page) {
380 dev_err(q->dev, "mmap: virt_to_page failed\n");
381 __videobuf_dc_free(q->dev, mem);
382 goto error;
383 }
384 retval = vm_insert_page(vma, start, page);
385 if (retval) {
386 dev_err(q->dev, "mmap: insert failed with error %d\n",
387 retval);
388 __videobuf_dc_free(q->dev, mem);
389 goto error;
390 }
391 start += PAGE_SIZE;
392 pos += PAGE_SIZE;
393
394 if (size > PAGE_SIZE)
395 size -= PAGE_SIZE;
396 else
397 size = 0;
398 }
399 }
400
401 vma->vm_ops = &videobuf_vm_ops;
402 vma->vm_flags |= VM_DONTEXPAND;
403 vma->vm_private_data = map;
404
405 dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
406 map, q, vma->vm_start, vma->vm_end,
407 (long int)buf->bsize, vma->vm_pgoff, buf->i);
408
409 videobuf_vm_open(vma);
410
411 return 0;
412
413error:
414 kfree(map);
415 return -ENOMEM;
416}
417
418static struct videobuf_qtype_ops qops = {
419 .magic = MAGIC_QTYPE_OPS,
420 .alloc_vb = __videobuf_alloc_uncached,
421 .iolock = __videobuf_iolock,
422 .mmap_mapper = __videobuf_mmap_mapper,
423 .vaddr = __videobuf_to_vaddr,
424};
425
426static struct videobuf_qtype_ops qops_cached = {
427 .magic = MAGIC_QTYPE_OPS,
428 .alloc_vb = __videobuf_alloc_cached,
429 .iolock = __videobuf_iolock,
430 .sync = __videobuf_sync,
431 .mmap_mapper = __videobuf_mmap_mapper,
432 .vaddr = __videobuf_to_vaddr,
433};
434
435void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
436 const struct videobuf_queue_ops *ops,
437 struct device *dev,
438 spinlock_t *irqlock,
439 enum v4l2_buf_type type,
440 enum v4l2_field field,
441 unsigned int msize,
442 void *priv,
443 struct mutex *ext_lock)
444{
445 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
446 priv, &qops, ext_lock);
447}
448EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
449
450void videobuf_queue_dma_contig_init_cached(struct videobuf_queue *q,
451 const struct videobuf_queue_ops *ops,
452 struct device *dev,
453 spinlock_t *irqlock,
454 enum v4l2_buf_type type,
455 enum v4l2_field field,
456 unsigned int msize,
457 void *priv, struct mutex *ext_lock)
458{
459 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
460 priv, &qops_cached, ext_lock);
461}
462EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init_cached);
463
464dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
465{
466 struct videobuf_dma_contig_memory *mem = buf->priv;
467
468 BUG_ON(!mem);
469 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
470
471 return mem->dma_handle;
472}
473EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
474
475void videobuf_dma_contig_free(struct videobuf_queue *q,
476 struct videobuf_buffer *buf)
477{
478 struct videobuf_dma_contig_memory *mem = buf->priv;
479
480 /* mmapped memory can't be freed here, otherwise mmapped region
481 would be released, while still needed. In this case, the memory
482 release should happen inside videobuf_vm_close().
483 So, it should free memory only if the memory were allocated for
484 read() operation.
485 */
486 if (buf->memory != V4L2_MEMORY_USERPTR)
487 return;
488
489 if (!mem)
490 return;
491
492 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
493
494 /* handle user space pointer case */
495 if (buf->baddr) {
496 videobuf_dma_contig_user_put(mem);
497 return;
498 }
499
500 /* read() method */
501 if (mem->vaddr) {
502 __videobuf_dc_free(q->dev, mem);
503 mem->vaddr = NULL;
504 }
505}
506EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
507
508MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
509MODULE_AUTHOR("Magnus Damm");
510MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
new file mode 100644
index 000000000000..f300deafd268
--- /dev/null
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -0,0 +1,633 @@
1/*
2 * helper functions for SG DMA video4linux capture buffers
3 *
4 * The functions expect the hardware being able to scatter gather
5 * (i.e. the buffers are not linear in physical memory, but fragmented
6 * into PAGE_SIZE chunks). They also assume the driver does not need
7 * to touch the video data.
8 *
9 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
10 *
11 * Highly based on video-buf written originally by:
12 * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
13 * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
14 * (c) 2006 Ted Walther and John Sokol
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2
19 */
20
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/sched.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27
28#include <linux/dma-mapping.h>
29#include <linux/vmalloc.h>
30#include <linux/pagemap.h>
31#include <linux/scatterlist.h>
32#include <asm/page.h>
33#include <asm/pgtable.h>
34
35#include <media/videobuf-dma-sg.h>
36
37#define MAGIC_DMABUF 0x19721112
38#define MAGIC_SG_MEM 0x17890714
39
40#define MAGIC_CHECK(is, should) \
41 if (unlikely((is) != (should))) { \
42 printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
43 is, should); \
44 BUG(); \
45 }
46
47static int debug;
48module_param(debug, int, 0644);
49
50MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers");
51MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
52MODULE_LICENSE("GPL");
53
54#define dprintk(level, fmt, arg...) \
55 if (debug >= level) \
56 printk(KERN_DEBUG "vbuf-sg: " fmt , ## arg)
57
58/* --------------------------------------------------------------------- */
59
60/*
61 * Return a scatterlist for some page-aligned vmalloc()'ed memory
62 * block (NULL on errors). Memory for the scatterlist is allocated
63 * using kmalloc. The caller must free the memory.
64 */
65static struct scatterlist *videobuf_vmalloc_to_sg(unsigned char *virt,
66 int nr_pages)
67{
68 struct scatterlist *sglist;
69 struct page *pg;
70 int i;
71
72 sglist = vzalloc(nr_pages * sizeof(*sglist));
73 if (NULL == sglist)
74 return NULL;
75 sg_init_table(sglist, nr_pages);
76 for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
77 pg = vmalloc_to_page(virt);
78 if (NULL == pg)
79 goto err;
80 BUG_ON(PageHighMem(pg));
81 sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
82 }
83 return sglist;
84
85err:
86 vfree(sglist);
87 return NULL;
88}
89
90/*
91 * Return a scatterlist for a an array of userpages (NULL on errors).
92 * Memory for the scatterlist is allocated using kmalloc. The caller
93 * must free the memory.
94 */
95static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
96 int nr_pages, int offset, size_t size)
97{
98 struct scatterlist *sglist;
99 int i;
100
101 if (NULL == pages[0])
102 return NULL;
103 sglist = vmalloc(nr_pages * sizeof(*sglist));
104 if (NULL == sglist)
105 return NULL;
106 sg_init_table(sglist, nr_pages);
107
108 if (PageHighMem(pages[0]))
109 /* DMA to highmem pages might not work */
110 goto highmem;
111 sg_set_page(&sglist[0], pages[0],
112 min_t(size_t, PAGE_SIZE - offset, size), offset);
113 size -= min_t(size_t, PAGE_SIZE - offset, size);
114 for (i = 1; i < nr_pages; i++) {
115 if (NULL == pages[i])
116 goto nopage;
117 if (PageHighMem(pages[i]))
118 goto highmem;
119 sg_set_page(&sglist[i], pages[i], min_t(size_t, PAGE_SIZE, size), 0);
120 size -= min_t(size_t, PAGE_SIZE, size);
121 }
122 return sglist;
123
124nopage:
125 dprintk(2, "sgl: oops - no page\n");
126 vfree(sglist);
127 return NULL;
128
129highmem:
130 dprintk(2, "sgl: oops - highmem page\n");
131 vfree(sglist);
132 return NULL;
133}
134
135/* --------------------------------------------------------------------- */
136
137struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf)
138{
139 struct videobuf_dma_sg_memory *mem = buf->priv;
140 BUG_ON(!mem);
141
142 MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
143
144 return &mem->dma;
145}
146EXPORT_SYMBOL_GPL(videobuf_to_dma);
147
148void videobuf_dma_init(struct videobuf_dmabuf *dma)
149{
150 memset(dma, 0, sizeof(*dma));
151 dma->magic = MAGIC_DMABUF;
152}
153EXPORT_SYMBOL_GPL(videobuf_dma_init);
154
155static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
156 int direction, unsigned long data, unsigned long size)
157{
158 unsigned long first, last;
159 int err, rw = 0;
160
161 dma->direction = direction;
162 switch (dma->direction) {
163 case DMA_FROM_DEVICE:
164 rw = READ;
165 break;
166 case DMA_TO_DEVICE:
167 rw = WRITE;
168 break;
169 default:
170 BUG();
171 }
172
173 first = (data & PAGE_MASK) >> PAGE_SHIFT;
174 last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT;
175 dma->offset = data & ~PAGE_MASK;
176 dma->size = size;
177 dma->nr_pages = last-first+1;
178 dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL);
179 if (NULL == dma->pages)
180 return -ENOMEM;
181
182 dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
183 data, size, dma->nr_pages);
184
185 err = get_user_pages(current, current->mm,
186 data & PAGE_MASK, dma->nr_pages,
187 rw == READ, 1, /* force */
188 dma->pages, NULL);
189
190 if (err != dma->nr_pages) {
191 dma->nr_pages = (err >= 0) ? err : 0;
192 dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages);
193 return err < 0 ? err : -EINVAL;
194 }
195 return 0;
196}
197
198int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
199 unsigned long data, unsigned long size)
200{
201 int ret;
202
203 down_read(&current->mm->mmap_sem);
204 ret = videobuf_dma_init_user_locked(dma, direction, data, size);
205 up_read(&current->mm->mmap_sem);
206
207 return ret;
208}
209EXPORT_SYMBOL_GPL(videobuf_dma_init_user);
210
211int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
212 int nr_pages)
213{
214 dprintk(1, "init kernel [%d pages]\n", nr_pages);
215
216 dma->direction = direction;
217 dma->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
218 if (NULL == dma->vaddr) {
219 dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
220 return -ENOMEM;
221 }
222
223 dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n",
224 (unsigned long)dma->vaddr,
225 nr_pages << PAGE_SHIFT);
226
227 memset(dma->vaddr, 0, nr_pages << PAGE_SHIFT);
228 dma->nr_pages = nr_pages;
229
230 return 0;
231}
232EXPORT_SYMBOL_GPL(videobuf_dma_init_kernel);
233
234int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
235 dma_addr_t addr, int nr_pages)
236{
237 dprintk(1, "init overlay [%d pages @ bus 0x%lx]\n",
238 nr_pages, (unsigned long)addr);
239 dma->direction = direction;
240
241 if (0 == addr)
242 return -EINVAL;
243
244 dma->bus_addr = addr;
245 dma->nr_pages = nr_pages;
246
247 return 0;
248}
249EXPORT_SYMBOL_GPL(videobuf_dma_init_overlay);
250
251int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
252{
253 MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
254 BUG_ON(0 == dma->nr_pages);
255
256 if (dma->pages) {
257 dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages,
258 dma->offset, dma->size);
259 }
260 if (dma->vaddr) {
261 dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr,
262 dma->nr_pages);
263 }
264 if (dma->bus_addr) {
265 dma->sglist = vmalloc(sizeof(*dma->sglist));
266 if (NULL != dma->sglist) {
267 dma->sglen = 1;
268 sg_dma_address(&dma->sglist[0]) = dma->bus_addr
269 & PAGE_MASK;
270 dma->sglist[0].offset = dma->bus_addr & ~PAGE_MASK;
271 sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE;
272 }
273 }
274 if (NULL == dma->sglist) {
275 dprintk(1, "scatterlist is NULL\n");
276 return -ENOMEM;
277 }
278 if (!dma->bus_addr) {
279 dma->sglen = dma_map_sg(dev, dma->sglist,
280 dma->nr_pages, dma->direction);
281 if (0 == dma->sglen) {
282 printk(KERN_WARNING
283 "%s: videobuf_map_sg failed\n", __func__);
284 vfree(dma->sglist);
285 dma->sglist = NULL;
286 dma->sglen = 0;
287 return -ENOMEM;
288 }
289 }
290
291 return 0;
292}
293EXPORT_SYMBOL_GPL(videobuf_dma_map);
294
295int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma)
296{
297 MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
298
299 if (!dma->sglen)
300 return 0;
301
302 dma_unmap_sg(dev, dma->sglist, dma->sglen, dma->direction);
303
304 vfree(dma->sglist);
305 dma->sglist = NULL;
306 dma->sglen = 0;
307
308 return 0;
309}
310EXPORT_SYMBOL_GPL(videobuf_dma_unmap);
311
312int videobuf_dma_free(struct videobuf_dmabuf *dma)
313{
314 int i;
315 MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
316 BUG_ON(dma->sglen);
317
318 if (dma->pages) {
319 for (i = 0; i < dma->nr_pages; i++)
320 page_cache_release(dma->pages[i]);
321 kfree(dma->pages);
322 dma->pages = NULL;
323 }
324
325 vfree(dma->vaddr);
326 dma->vaddr = NULL;
327
328 if (dma->bus_addr)
329 dma->bus_addr = 0;
330 dma->direction = DMA_NONE;
331
332 return 0;
333}
334EXPORT_SYMBOL_GPL(videobuf_dma_free);
335
336/* --------------------------------------------------------------------- */
337
338static void videobuf_vm_open(struct vm_area_struct *vma)
339{
340 struct videobuf_mapping *map = vma->vm_private_data;
341
342 dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
343 map->count, vma->vm_start, vma->vm_end);
344
345 map->count++;
346}
347
348static void videobuf_vm_close(struct vm_area_struct *vma)
349{
350 struct videobuf_mapping *map = vma->vm_private_data;
351 struct videobuf_queue *q = map->q;
352 struct videobuf_dma_sg_memory *mem;
353 int i;
354
355 dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
356 map->count, vma->vm_start, vma->vm_end);
357
358 map->count--;
359 if (0 == map->count) {
360 dprintk(1, "munmap %p q=%p\n", map, q);
361 videobuf_queue_lock(q);
362 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
363 if (NULL == q->bufs[i])
364 continue;
365 mem = q->bufs[i]->priv;
366 if (!mem)
367 continue;
368
369 MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
370
371 if (q->bufs[i]->map != map)
372 continue;
373 q->bufs[i]->map = NULL;
374 q->bufs[i]->baddr = 0;
375 q->ops->buf_release(q, q->bufs[i]);
376 }
377 videobuf_queue_unlock(q);
378 kfree(map);
379 }
380 return;
381}
382
383/*
384 * Get a anonymous page for the mapping. Make sure we can DMA to that
385 * memory location with 32bit PCI devices (i.e. don't use highmem for
386 * now ...). Bounce buffers don't work very well for the data rates
387 * video capture has.
388 */
389static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
390{
391 struct page *page;
392
393 dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
394 (unsigned long)vmf->virtual_address,
395 vma->vm_start, vma->vm_end);
396
397 page = alloc_page(GFP_USER | __GFP_DMA32);
398 if (!page)
399 return VM_FAULT_OOM;
400 clear_user_highpage(page, (unsigned long)vmf->virtual_address);
401 vmf->page = page;
402
403 return 0;
404}
405
406static const struct vm_operations_struct videobuf_vm_ops = {
407 .open = videobuf_vm_open,
408 .close = videobuf_vm_close,
409 .fault = videobuf_vm_fault,
410};
411
412/* ---------------------------------------------------------------------
413 * SG handlers for the generic methods
414 */
415
416/* Allocated area consists on 3 parts:
417 struct video_buffer
418 struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
419 struct videobuf_dma_sg_memory
420 */
421
422static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
423{
424 struct videobuf_dma_sg_memory *mem;
425 struct videobuf_buffer *vb;
426
427 vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
428 if (!vb)
429 return vb;
430
431 mem = vb->priv = ((char *)vb) + size;
432 mem->magic = MAGIC_SG_MEM;
433
434 videobuf_dma_init(&mem->dma);
435
436 dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
437 __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
438 mem, (long)sizeof(*mem));
439
440 return vb;
441}
442
443static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
444{
445 struct videobuf_dma_sg_memory *mem = buf->priv;
446 BUG_ON(!mem);
447
448 MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
449
450 return mem->dma.vaddr;
451}
452
453static int __videobuf_iolock(struct videobuf_queue *q,
454 struct videobuf_buffer *vb,
455 struct v4l2_framebuffer *fbuf)
456{
457 int err, pages;
458 dma_addr_t bus;
459 struct videobuf_dma_sg_memory *mem = vb->priv;
460 BUG_ON(!mem);
461
462 MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
463
464 switch (vb->memory) {
465 case V4L2_MEMORY_MMAP:
466 case V4L2_MEMORY_USERPTR:
467 if (0 == vb->baddr) {
468 /* no userspace addr -- kernel bounce buffer */
469 pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
470 err = videobuf_dma_init_kernel(&mem->dma,
471 DMA_FROM_DEVICE,
472 pages);
473 if (0 != err)
474 return err;
475 } else if (vb->memory == V4L2_MEMORY_USERPTR) {
476 /* dma directly to userspace */
477 err = videobuf_dma_init_user(&mem->dma,
478 DMA_FROM_DEVICE,
479 vb->baddr, vb->bsize);
480 if (0 != err)
481 return err;
482 } else {
483 /* NOTE: HACK: videobuf_iolock on V4L2_MEMORY_MMAP
484 buffers can only be called from videobuf_qbuf
485 we take current->mm->mmap_sem there, to prevent
486 locking inversion, so don't take it here */
487
488 err = videobuf_dma_init_user_locked(&mem->dma,
489 DMA_FROM_DEVICE,
490 vb->baddr, vb->bsize);
491 if (0 != err)
492 return err;
493 }
494 break;
495 case V4L2_MEMORY_OVERLAY:
496 if (NULL == fbuf)
497 return -EINVAL;
498 /* FIXME: need sanity checks for vb->boff */
499 /*
500 * Using a double cast to avoid compiler warnings when
501 * building for PAE. Compiler doesn't like direct casting
502 * of a 32 bit ptr to 64 bit integer.
503 */
504 bus = (dma_addr_t)(unsigned long)fbuf->base + vb->boff;
505 pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
506 err = videobuf_dma_init_overlay(&mem->dma, DMA_FROM_DEVICE,
507 bus, pages);
508 if (0 != err)
509 return err;
510 break;
511 default:
512 BUG();
513 }
514 err = videobuf_dma_map(q->dev, &mem->dma);
515 if (0 != err)
516 return err;
517
518 return 0;
519}
520
521static int __videobuf_sync(struct videobuf_queue *q,
522 struct videobuf_buffer *buf)
523{
524 struct videobuf_dma_sg_memory *mem = buf->priv;
525 BUG_ON(!mem || !mem->dma.sglen);
526
527 MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
528 MAGIC_CHECK(mem->dma.magic, MAGIC_DMABUF);
529
530 dma_sync_sg_for_cpu(q->dev, mem->dma.sglist,
531 mem->dma.sglen, mem->dma.direction);
532
533 return 0;
534}
535
536static int __videobuf_mmap_mapper(struct videobuf_queue *q,
537 struct videobuf_buffer *buf,
538 struct vm_area_struct *vma)
539{
540 struct videobuf_dma_sg_memory *mem = buf->priv;
541 struct videobuf_mapping *map;
542 unsigned int first, last, size = 0, i;
543 int retval;
544
545 retval = -EINVAL;
546
547 BUG_ON(!mem);
548 MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
549
550 /* look for first buffer to map */
551 for (first = 0; first < VIDEO_MAX_FRAME; first++) {
552 if (buf == q->bufs[first]) {
553 size = PAGE_ALIGN(q->bufs[first]->bsize);
554 break;
555 }
556 }
557
558 /* paranoia, should never happen since buf is always valid. */
559 if (!size) {
560 dprintk(1, "mmap app bug: offset invalid [offset=0x%lx]\n",
561 (vma->vm_pgoff << PAGE_SHIFT));
562 goto done;
563 }
564
565 last = first;
566
567 /* create mapping + update buffer list */
568 retval = -ENOMEM;
569 map = kmalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
570 if (NULL == map)
571 goto done;
572
573 size = 0;
574 for (i = first; i <= last; i++) {
575 if (NULL == q->bufs[i])
576 continue;
577 q->bufs[i]->map = map;
578 q->bufs[i]->baddr = vma->vm_start + size;
579 size += PAGE_ALIGN(q->bufs[i]->bsize);
580 }
581
582 map->count = 1;
583 map->q = q;
584 vma->vm_ops = &videobuf_vm_ops;
585 vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
586 vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
587 vma->vm_private_data = map;
588 dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
589 map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);
590 retval = 0;
591
592done:
593 return retval;
594}
595
596static struct videobuf_qtype_ops sg_ops = {
597 .magic = MAGIC_QTYPE_OPS,
598
599 .alloc_vb = __videobuf_alloc_vb,
600 .iolock = __videobuf_iolock,
601 .sync = __videobuf_sync,
602 .mmap_mapper = __videobuf_mmap_mapper,
603 .vaddr = __videobuf_to_vaddr,
604};
605
606void *videobuf_sg_alloc(size_t size)
607{
608 struct videobuf_queue q;
609
610 /* Required to make generic handler to call __videobuf_alloc */
611 q.int_ops = &sg_ops;
612
613 q.msize = size;
614
615 return videobuf_alloc_vb(&q);
616}
617EXPORT_SYMBOL_GPL(videobuf_sg_alloc);
618
619void videobuf_queue_sg_init(struct videobuf_queue *q,
620 const struct videobuf_queue_ops *ops,
621 struct device *dev,
622 spinlock_t *irqlock,
623 enum v4l2_buf_type type,
624 enum v4l2_field field,
625 unsigned int msize,
626 void *priv,
627 struct mutex *ext_lock)
628{
629 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
630 priv, &sg_ops, ext_lock);
631}
632EXPORT_SYMBOL_GPL(videobuf_queue_sg_init);
633
diff --git a/drivers/media/v4l2-core/videobuf-dvb.c b/drivers/media/v4l2-core/videobuf-dvb.c
new file mode 100644
index 000000000000..b7efa4516d36
--- /dev/null
+++ b/drivers/media/v4l2-core/videobuf-dvb.c
@@ -0,0 +1,398 @@
1/*
2 *
3 * some helper function for simple DVB cards which simply DMA the
4 * complete transport stream and let the computer sort everything else
5 * (i.e. we are using the software demux, ...). Also uses the
6 * video-buf to manage DMA buffers.
7 *
8 * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/device.h>
19#include <linux/fs.h>
20#include <linux/kthread.h>
21#include <linux/file.h>
22#include <linux/slab.h>
23
24#include <linux/freezer.h>
25
26#include <media/videobuf-core.h>
27#include <media/videobuf-dvb.h>
28
29/* ------------------------------------------------------------------ */
30
31MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
32MODULE_LICENSE("GPL");
33
34static unsigned int debug;
35module_param(debug, int, 0644);
36MODULE_PARM_DESC(debug,"enable debug messages");
37
38#define dprintk(fmt, arg...) if (debug) \
39 printk(KERN_DEBUG "%s/dvb: " fmt, dvb->name , ## arg)
40
41/* ------------------------------------------------------------------ */
42
43static int videobuf_dvb_thread(void *data)
44{
45 struct videobuf_dvb *dvb = data;
46 struct videobuf_buffer *buf;
47 unsigned long flags;
48 void *outp;
49
50 dprintk("dvb thread started\n");
51 set_freezable();
52 videobuf_read_start(&dvb->dvbq);
53
54 for (;;) {
55 /* fetch next buffer */
56 buf = list_entry(dvb->dvbq.stream.next,
57 struct videobuf_buffer, stream);
58 list_del(&buf->stream);
59 videobuf_waiton(&dvb->dvbq, buf, 0, 1);
60
61 /* no more feeds left or stop_feed() asked us to quit */
62 if (0 == dvb->nfeeds)
63 break;
64 if (kthread_should_stop())
65 break;
66 try_to_freeze();
67
68 /* feed buffer data to demux */
69 outp = videobuf_queue_to_vaddr(&dvb->dvbq, buf);
70
71 if (buf->state == VIDEOBUF_DONE)
72 dvb_dmx_swfilter(&dvb->demux, outp,
73 buf->size);
74
75 /* requeue buffer */
76 list_add_tail(&buf->stream,&dvb->dvbq.stream);
77 spin_lock_irqsave(dvb->dvbq.irqlock,flags);
78 dvb->dvbq.ops->buf_queue(&dvb->dvbq,buf);
79 spin_unlock_irqrestore(dvb->dvbq.irqlock,flags);
80 }
81
82 videobuf_read_stop(&dvb->dvbq);
83 dprintk("dvb thread stopped\n");
84
85 /* Hmm, linux becomes *very* unhappy without this ... */
86 while (!kthread_should_stop()) {
87 set_current_state(TASK_INTERRUPTIBLE);
88 schedule();
89 }
90 return 0;
91}
92
93static int videobuf_dvb_start_feed(struct dvb_demux_feed *feed)
94{
95 struct dvb_demux *demux = feed->demux;
96 struct videobuf_dvb *dvb = demux->priv;
97 int rc;
98
99 if (!demux->dmx.frontend)
100 return -EINVAL;
101
102 mutex_lock(&dvb->lock);
103 dvb->nfeeds++;
104 rc = dvb->nfeeds;
105
106 if (NULL != dvb->thread)
107 goto out;
108 dvb->thread = kthread_run(videobuf_dvb_thread,
109 dvb, "%s dvb", dvb->name);
110 if (IS_ERR(dvb->thread)) {
111 rc = PTR_ERR(dvb->thread);
112 dvb->thread = NULL;
113 }
114
115out:
116 mutex_unlock(&dvb->lock);
117 return rc;
118}
119
120static int videobuf_dvb_stop_feed(struct dvb_demux_feed *feed)
121{
122 struct dvb_demux *demux = feed->demux;
123 struct videobuf_dvb *dvb = demux->priv;
124 int err = 0;
125
126 mutex_lock(&dvb->lock);
127 dvb->nfeeds--;
128 if (0 == dvb->nfeeds && NULL != dvb->thread) {
129 err = kthread_stop(dvb->thread);
130 dvb->thread = NULL;
131 }
132 mutex_unlock(&dvb->lock);
133 return err;
134}
135
136static int videobuf_dvb_register_adapter(struct videobuf_dvb_frontends *fe,
137 struct module *module,
138 void *adapter_priv,
139 struct device *device,
140 char *adapter_name,
141 short *adapter_nr,
142 int mfe_shared)
143{
144 int result;
145
146 mutex_init(&fe->lock);
147
148 /* register adapter */
149 result = dvb_register_adapter(&fe->adapter, adapter_name, module,
150 device, adapter_nr);
151 if (result < 0) {
152 printk(KERN_WARNING "%s: dvb_register_adapter failed (errno = %d)\n",
153 adapter_name, result);
154 }
155 fe->adapter.priv = adapter_priv;
156 fe->adapter.mfe_shared = mfe_shared;
157
158 return result;
159}
160
161static int videobuf_dvb_register_frontend(struct dvb_adapter *adapter,
162 struct videobuf_dvb *dvb)
163{
164 int result;
165
166 /* register frontend */
167 result = dvb_register_frontend(adapter, dvb->frontend);
168 if (result < 0) {
169 printk(KERN_WARNING "%s: dvb_register_frontend failed (errno = %d)\n",
170 dvb->name, result);
171 goto fail_frontend;
172 }
173
174 /* register demux stuff */
175 dvb->demux.dmx.capabilities =
176 DMX_TS_FILTERING | DMX_SECTION_FILTERING |
177 DMX_MEMORY_BASED_FILTERING;
178 dvb->demux.priv = dvb;
179 dvb->demux.filternum = 256;
180 dvb->demux.feednum = 256;
181 dvb->demux.start_feed = videobuf_dvb_start_feed;
182 dvb->demux.stop_feed = videobuf_dvb_stop_feed;
183 result = dvb_dmx_init(&dvb->demux);
184 if (result < 0) {
185 printk(KERN_WARNING "%s: dvb_dmx_init failed (errno = %d)\n",
186 dvb->name, result);
187 goto fail_dmx;
188 }
189
190 dvb->dmxdev.filternum = 256;
191 dvb->dmxdev.demux = &dvb->demux.dmx;
192 dvb->dmxdev.capabilities = 0;
193 result = dvb_dmxdev_init(&dvb->dmxdev, adapter);
194
195 if (result < 0) {
196 printk(KERN_WARNING "%s: dvb_dmxdev_init failed (errno = %d)\n",
197 dvb->name, result);
198 goto fail_dmxdev;
199 }
200
201 dvb->fe_hw.source = DMX_FRONTEND_0;
202 result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
203 if (result < 0) {
204 printk(KERN_WARNING "%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
205 dvb->name, result);
206 goto fail_fe_hw;
207 }
208
209 dvb->fe_mem.source = DMX_MEMORY_FE;
210 result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
211 if (result < 0) {
212 printk(KERN_WARNING "%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
213 dvb->name, result);
214 goto fail_fe_mem;
215 }
216
217 result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
218 if (result < 0) {
219 printk(KERN_WARNING "%s: connect_frontend failed (errno = %d)\n",
220 dvb->name, result);
221 goto fail_fe_conn;
222 }
223
224 /* register network adapter */
225 result = dvb_net_init(adapter, &dvb->net, &dvb->demux.dmx);
226 if (result < 0) {
227 printk(KERN_WARNING "%s: dvb_net_init failed (errno = %d)\n",
228 dvb->name, result);
229 goto fail_fe_conn;
230 }
231 return 0;
232
233fail_fe_conn:
234 dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
235fail_fe_mem:
236 dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
237fail_fe_hw:
238 dvb_dmxdev_release(&dvb->dmxdev);
239fail_dmxdev:
240 dvb_dmx_release(&dvb->demux);
241fail_dmx:
242 dvb_unregister_frontend(dvb->frontend);
243fail_frontend:
244 dvb_frontend_detach(dvb->frontend);
245 dvb->frontend = NULL;
246
247 return result;
248}
249
250/* ------------------------------------------------------------------ */
251/* Register a single adapter and one or more frontends */
252int videobuf_dvb_register_bus(struct videobuf_dvb_frontends *f,
253 struct module *module,
254 void *adapter_priv,
255 struct device *device,
256 short *adapter_nr,
257 int mfe_shared)
258{
259 struct list_head *list, *q;
260 struct videobuf_dvb_frontend *fe;
261 int res;
262
263 fe = videobuf_dvb_get_frontend(f, 1);
264 if (!fe) {
265 printk(KERN_WARNING "Unable to register the adapter which has no frontends\n");
266 return -EINVAL;
267 }
268
269 /* Bring up the adapter */
270 res = videobuf_dvb_register_adapter(f, module, adapter_priv, device,
271 fe->dvb.name, adapter_nr, mfe_shared);
272 if (res < 0) {
273 printk(KERN_WARNING "videobuf_dvb_register_adapter failed (errno = %d)\n", res);
274 return res;
275 }
276
277 /* Attach all of the frontends to the adapter */
278 mutex_lock(&f->lock);
279 list_for_each_safe(list, q, &f->felist) {
280 fe = list_entry(list, struct videobuf_dvb_frontend, felist);
281 res = videobuf_dvb_register_frontend(&f->adapter, &fe->dvb);
282 if (res < 0) {
283 printk(KERN_WARNING "%s: videobuf_dvb_register_frontend failed (errno = %d)\n",
284 fe->dvb.name, res);
285 goto err;
286 }
287 }
288 mutex_unlock(&f->lock);
289 return 0;
290
291err:
292 mutex_unlock(&f->lock);
293 videobuf_dvb_unregister_bus(f);
294 return res;
295}
296EXPORT_SYMBOL(videobuf_dvb_register_bus);
297
298void videobuf_dvb_unregister_bus(struct videobuf_dvb_frontends *f)
299{
300 videobuf_dvb_dealloc_frontends(f);
301
302 dvb_unregister_adapter(&f->adapter);
303}
304EXPORT_SYMBOL(videobuf_dvb_unregister_bus);
305
306struct videobuf_dvb_frontend *videobuf_dvb_get_frontend(
307 struct videobuf_dvb_frontends *f, int id)
308{
309 struct list_head *list, *q;
310 struct videobuf_dvb_frontend *fe, *ret = NULL;
311
312 mutex_lock(&f->lock);
313
314 list_for_each_safe(list, q, &f->felist) {
315 fe = list_entry(list, struct videobuf_dvb_frontend, felist);
316 if (fe->id == id) {
317 ret = fe;
318 break;
319 }
320 }
321
322 mutex_unlock(&f->lock);
323
324 return ret;
325}
326EXPORT_SYMBOL(videobuf_dvb_get_frontend);
327
328int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f,
329 struct dvb_frontend *p)
330{
331 struct list_head *list, *q;
332 struct videobuf_dvb_frontend *fe = NULL;
333 int ret = 0;
334
335 mutex_lock(&f->lock);
336
337 list_for_each_safe(list, q, &f->felist) {
338 fe = list_entry(list, struct videobuf_dvb_frontend, felist);
339 if (fe->dvb.frontend == p) {
340 ret = fe->id;
341 break;
342 }
343 }
344
345 mutex_unlock(&f->lock);
346
347 return ret;
348}
349EXPORT_SYMBOL(videobuf_dvb_find_frontend);
350
351struct videobuf_dvb_frontend *videobuf_dvb_alloc_frontend(
352 struct videobuf_dvb_frontends *f, int id)
353{
354 struct videobuf_dvb_frontend *fe;
355
356 fe = kzalloc(sizeof(struct videobuf_dvb_frontend), GFP_KERNEL);
357 if (fe == NULL)
358 goto fail_alloc;
359
360 fe->id = id;
361 mutex_init(&fe->dvb.lock);
362
363 mutex_lock(&f->lock);
364 list_add_tail(&fe->felist, &f->felist);
365 mutex_unlock(&f->lock);
366
367fail_alloc:
368 return fe;
369}
370EXPORT_SYMBOL(videobuf_dvb_alloc_frontend);
371
372void videobuf_dvb_dealloc_frontends(struct videobuf_dvb_frontends *f)
373{
374 struct list_head *list, *q;
375 struct videobuf_dvb_frontend *fe;
376
377 mutex_lock(&f->lock);
378 list_for_each_safe(list, q, &f->felist) {
379 fe = list_entry(list, struct videobuf_dvb_frontend, felist);
380 if (fe->dvb.net.dvbdev) {
381 dvb_net_release(&fe->dvb.net);
382 fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
383 &fe->dvb.fe_mem);
384 fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
385 &fe->dvb.fe_hw);
386 dvb_dmxdev_release(&fe->dvb.dmxdev);
387 dvb_dmx_release(&fe->dvb.demux);
388 dvb_unregister_frontend(fe->dvb.frontend);
389 }
390 if (fe->dvb.frontend)
391 /* always allocated, may have been reset */
392 dvb_frontend_detach(fe->dvb.frontend);
393 list_del(list); /* remove list entry */
394 kfree(fe); /* free frontend allocation */
395 }
396 mutex_unlock(&f->lock);
397}
398EXPORT_SYMBOL(videobuf_dvb_dealloc_frontends);
diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
new file mode 100644
index 000000000000..df142580e44c
--- /dev/null
+++ b/drivers/media/v4l2-core/videobuf-vmalloc.c
@@ -0,0 +1,349 @@
1/*
2 * helper functions for vmalloc video4linux capture buffers
3 *
4 * The functions expect the hardware being able to scatter gather
5 * (i.e. the buffers are not linear in physical memory, but fragmented
6 * into PAGE_SIZE chunks). They also assume the driver does not need
7 * to touch the video data.
8 *
9 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2
14 */
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <linux/slab.h>
20#include <linux/interrupt.h>
21
22#include <linux/pci.h>
23#include <linux/vmalloc.h>
24#include <linux/pagemap.h>
25#include <asm/page.h>
26#include <asm/pgtable.h>
27
28#include <media/videobuf-vmalloc.h>
29
30#define MAGIC_DMABUF 0x17760309
31#define MAGIC_VMAL_MEM 0x18221223
32
33#define MAGIC_CHECK(is, should) \
34 if (unlikely((is) != (should))) { \
35 printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
36 is, should); \
37 BUG(); \
38 }
39
40static int debug;
41module_param(debug, int, 0644);
42
43MODULE_DESCRIPTION("helper module to manage video4linux vmalloc buffers");
44MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
45MODULE_LICENSE("GPL");
46
47#define dprintk(level, fmt, arg...) \
48 if (debug >= level) \
49 printk(KERN_DEBUG "vbuf-vmalloc: " fmt , ## arg)
50
51
52/***************************************************************************/
53
54static void videobuf_vm_open(struct vm_area_struct *vma)
55{
56 struct videobuf_mapping *map = vma->vm_private_data;
57
58 dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
59 map->count, vma->vm_start, vma->vm_end);
60
61 map->count++;
62}
63
64static void videobuf_vm_close(struct vm_area_struct *vma)
65{
66 struct videobuf_mapping *map = vma->vm_private_data;
67 struct videobuf_queue *q = map->q;
68 int i;
69
70 dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
71 map->count, vma->vm_start, vma->vm_end);
72
73 map->count--;
74 if (0 == map->count) {
75 struct videobuf_vmalloc_memory *mem;
76
77 dprintk(1, "munmap %p q=%p\n", map, q);
78 videobuf_queue_lock(q);
79
80 /* We need first to cancel streams, before unmapping */
81 if (q->streaming)
82 videobuf_queue_cancel(q);
83
84 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
85 if (NULL == q->bufs[i])
86 continue;
87
88 if (q->bufs[i]->map != map)
89 continue;
90
91 mem = q->bufs[i]->priv;
92 if (mem) {
93 /* This callback is called only if kernel has
94 allocated memory and this memory is mmapped.
95 In this case, memory should be freed,
96 in order to do memory unmap.
97 */
98
99 MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
100
101 /* vfree is not atomic - can't be
102 called with IRQ's disabled
103 */
104 dprintk(1, "%s: buf[%d] freeing (%p)\n",
105 __func__, i, mem->vaddr);
106
107 vfree(mem->vaddr);
108 mem->vaddr = NULL;
109 }
110
111 q->bufs[i]->map = NULL;
112 q->bufs[i]->baddr = 0;
113 }
114
115 kfree(map);
116
117 videobuf_queue_unlock(q);
118 }
119
120 return;
121}
122
123static const struct vm_operations_struct videobuf_vm_ops = {
124 .open = videobuf_vm_open,
125 .close = videobuf_vm_close,
126};
127
128/* ---------------------------------------------------------------------
129 * vmalloc handlers for the generic methods
130 */
131
132/* Allocated area consists on 3 parts:
133 struct video_buffer
134 struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
135 struct videobuf_dma_sg_memory
136 */
137
138static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
139{
140 struct videobuf_vmalloc_memory *mem;
141 struct videobuf_buffer *vb;
142
143 vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
144 if (!vb)
145 return vb;
146
147 mem = vb->priv = ((char *)vb) + size;
148 mem->magic = MAGIC_VMAL_MEM;
149
150 dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
151 __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
152 mem, (long)sizeof(*mem));
153
154 return vb;
155}
156
157static int __videobuf_iolock(struct videobuf_queue *q,
158 struct videobuf_buffer *vb,
159 struct v4l2_framebuffer *fbuf)
160{
161 struct videobuf_vmalloc_memory *mem = vb->priv;
162 int pages;
163
164 BUG_ON(!mem);
165
166 MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
167
168 switch (vb->memory) {
169 case V4L2_MEMORY_MMAP:
170 dprintk(1, "%s memory method MMAP\n", __func__);
171
172 /* All handling should be done by __videobuf_mmap_mapper() */
173 if (!mem->vaddr) {
174 printk(KERN_ERR "memory is not alloced/mmapped.\n");
175 return -EINVAL;
176 }
177 break;
178 case V4L2_MEMORY_USERPTR:
179 pages = PAGE_ALIGN(vb->size);
180
181 dprintk(1, "%s memory method USERPTR\n", __func__);
182
183 if (vb->baddr) {
184 printk(KERN_ERR "USERPTR is currently not supported\n");
185 return -EINVAL;
186 }
187
188 /* The only USERPTR currently supported is the one needed for
189 * read() method.
190 */
191
192 mem->vaddr = vmalloc_user(pages);
193 if (!mem->vaddr) {
194 printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
195 return -ENOMEM;
196 }
197 dprintk(1, "vmalloc is at addr %p (%d pages)\n",
198 mem->vaddr, pages);
199
200#if 0
201 int rc;
202 /* Kernel userptr is used also by read() method. In this case,
203 there's no need to remap, since data will be copied to user
204 */
205 if (!vb->baddr)
206 return 0;
207
208 /* FIXME: to properly support USERPTR, remap should occur.
209 The code below won't work, since mem->vma = NULL
210 */
211 /* Try to remap memory */
212 rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0);
213 if (rc < 0) {
214 printk(KERN_ERR "mmap: remap failed with error %d", rc);
215 return -ENOMEM;
216 }
217#endif
218
219 break;
220 case V4L2_MEMORY_OVERLAY:
221 default:
222 dprintk(1, "%s memory method OVERLAY/unknown\n", __func__);
223
224 /* Currently, doesn't support V4L2_MEMORY_OVERLAY */
225 printk(KERN_ERR "Memory method currently unsupported.\n");
226 return -EINVAL;
227 }
228
229 return 0;
230}
231
232static int __videobuf_mmap_mapper(struct videobuf_queue *q,
233 struct videobuf_buffer *buf,
234 struct vm_area_struct *vma)
235{
236 struct videobuf_vmalloc_memory *mem;
237 struct videobuf_mapping *map;
238 int retval, pages;
239
240 dprintk(1, "%s\n", __func__);
241
242 /* create mapping + update buffer list */
243 map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
244 if (NULL == map)
245 return -ENOMEM;
246
247 buf->map = map;
248 map->q = q;
249
250 buf->baddr = vma->vm_start;
251
252 mem = buf->priv;
253 BUG_ON(!mem);
254 MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
255
256 pages = PAGE_ALIGN(vma->vm_end - vma->vm_start);
257 mem->vaddr = vmalloc_user(pages);
258 if (!mem->vaddr) {
259 printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
260 goto error;
261 }
262 dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages);
263
264 /* Try to remap memory */
265 retval = remap_vmalloc_range(vma, mem->vaddr, 0);
266 if (retval < 0) {
267 printk(KERN_ERR "mmap: remap failed with error %d. ", retval);
268 vfree(mem->vaddr);
269 goto error;
270 }
271
272 vma->vm_ops = &videobuf_vm_ops;
273 vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
274 vma->vm_private_data = map;
275
276 dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
277 map, q, vma->vm_start, vma->vm_end,
278 (long int)buf->bsize,
279 vma->vm_pgoff, buf->i);
280
281 videobuf_vm_open(vma);
282
283 return 0;
284
285error:
286 mem = NULL;
287 kfree(map);
288 return -ENOMEM;
289}
290
291static struct videobuf_qtype_ops qops = {
292 .magic = MAGIC_QTYPE_OPS,
293
294 .alloc_vb = __videobuf_alloc_vb,
295 .iolock = __videobuf_iolock,
296 .mmap_mapper = __videobuf_mmap_mapper,
297 .vaddr = videobuf_to_vmalloc,
298};
299
300void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
301 const struct videobuf_queue_ops *ops,
302 struct device *dev,
303 spinlock_t *irqlock,
304 enum v4l2_buf_type type,
305 enum v4l2_field field,
306 unsigned int msize,
307 void *priv,
308 struct mutex *ext_lock)
309{
310 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
311 priv, &qops, ext_lock);
312}
313EXPORT_SYMBOL_GPL(videobuf_queue_vmalloc_init);
314
315void *videobuf_to_vmalloc(struct videobuf_buffer *buf)
316{
317 struct videobuf_vmalloc_memory *mem = buf->priv;
318 BUG_ON(!mem);
319 MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
320
321 return mem->vaddr;
322}
323EXPORT_SYMBOL_GPL(videobuf_to_vmalloc);
324
325void videobuf_vmalloc_free(struct videobuf_buffer *buf)
326{
327 struct videobuf_vmalloc_memory *mem = buf->priv;
328
329 /* mmapped memory can't be freed here, otherwise mmapped region
330 would be released, while still needed. In this case, the memory
331 release should happen inside videobuf_vm_close().
332 So, it should free memory only if the memory were allocated for
333 read() operation.
334 */
335 if ((buf->memory != V4L2_MEMORY_USERPTR) || buf->baddr)
336 return;
337
338 if (!mem)
339 return;
340
341 MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
342
343 vfree(mem->vaddr);
344 mem->vaddr = NULL;
345
346 return;
347}
348EXPORT_SYMBOL_GPL(videobuf_vmalloc_free);
349
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
new file mode 100644
index 000000000000..4da3df61901f
--- /dev/null
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -0,0 +1,2380 @@
1/*
2 * videobuf2-core.c - V4L2 driver helper framework
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 * Marek Szyprowski <m.szyprowski@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation.
12 */
13
14#include <linux/err.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mm.h>
18#include <linux/poll.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21
22#include <media/v4l2-dev.h>
23#include <media/v4l2-fh.h>
24#include <media/v4l2-event.h>
25#include <media/videobuf2-core.h>
26
27static int debug;
28module_param(debug, int, 0644);
29
30#define dprintk(level, fmt, arg...) \
31 do { \
32 if (debug >= level) \
33 printk(KERN_DEBUG "vb2: " fmt, ## arg); \
34 } while (0)
35
36#define call_memop(q, op, args...) \
37 (((q)->mem_ops->op) ? \
38 ((q)->mem_ops->op(args)) : 0)
39
40#define call_qop(q, op, args...) \
41 (((q)->ops->op) ? ((q)->ops->op(args)) : 0)
42
43#define V4L2_BUFFER_STATE_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
44 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
45 V4L2_BUF_FLAG_PREPARED)
46
47/**
48 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
49 */
50static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
51{
52 struct vb2_queue *q = vb->vb2_queue;
53 void *mem_priv;
54 int plane;
55
56 /* Allocate memory for all planes in this buffer */
57 for (plane = 0; plane < vb->num_planes; ++plane) {
58 mem_priv = call_memop(q, alloc, q->alloc_ctx[plane],
59 q->plane_sizes[plane]);
60 if (IS_ERR_OR_NULL(mem_priv))
61 goto free;
62
63 /* Associate allocator private data with this plane */
64 vb->planes[plane].mem_priv = mem_priv;
65 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
66 }
67
68 return 0;
69free:
70 /* Free already allocated memory if one of the allocations failed */
71 for (; plane > 0; --plane) {
72 call_memop(q, put, vb->planes[plane - 1].mem_priv);
73 vb->planes[plane - 1].mem_priv = NULL;
74 }
75
76 return -ENOMEM;
77}
78
79/**
80 * __vb2_buf_mem_free() - free memory of the given buffer
81 */
82static void __vb2_buf_mem_free(struct vb2_buffer *vb)
83{
84 struct vb2_queue *q = vb->vb2_queue;
85 unsigned int plane;
86
87 for (plane = 0; plane < vb->num_planes; ++plane) {
88 call_memop(q, put, vb->planes[plane].mem_priv);
89 vb->planes[plane].mem_priv = NULL;
90 dprintk(3, "Freed plane %d of buffer %d\n", plane,
91 vb->v4l2_buf.index);
92 }
93}
94
95/**
96 * __vb2_buf_userptr_put() - release userspace memory associated with
97 * a USERPTR buffer
98 */
99static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
100{
101 struct vb2_queue *q = vb->vb2_queue;
102 unsigned int plane;
103
104 for (plane = 0; plane < vb->num_planes; ++plane) {
105 if (vb->planes[plane].mem_priv)
106 call_memop(q, put_userptr, vb->planes[plane].mem_priv);
107 vb->planes[plane].mem_priv = NULL;
108 }
109}
110
111/**
112 * __setup_offsets() - setup unique offsets ("cookies") for every plane in
113 * every buffer on the queue
114 */
115static void __setup_offsets(struct vb2_queue *q, unsigned int n)
116{
117 unsigned int buffer, plane;
118 struct vb2_buffer *vb;
119 unsigned long off;
120
121 if (q->num_buffers) {
122 struct v4l2_plane *p;
123 vb = q->bufs[q->num_buffers - 1];
124 p = &vb->v4l2_planes[vb->num_planes - 1];
125 off = PAGE_ALIGN(p->m.mem_offset + p->length);
126 } else {
127 off = 0;
128 }
129
130 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
131 vb = q->bufs[buffer];
132 if (!vb)
133 continue;
134
135 for (plane = 0; plane < vb->num_planes; ++plane) {
136 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
137 vb->v4l2_planes[plane].m.mem_offset = off;
138
139 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
140 buffer, plane, off);
141
142 off += vb->v4l2_planes[plane].length;
143 off = PAGE_ALIGN(off);
144 }
145 }
146}
147
148/**
149 * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
150 * video buffer memory for all buffers/planes on the queue and initializes the
151 * queue
152 *
153 * Returns the number of buffers successfully allocated.
154 */
155static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
156 unsigned int num_buffers, unsigned int num_planes)
157{
158 unsigned int buffer;
159 struct vb2_buffer *vb;
160 int ret;
161
162 for (buffer = 0; buffer < num_buffers; ++buffer) {
163 /* Allocate videobuf buffer structures */
164 vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
165 if (!vb) {
166 dprintk(1, "Memory alloc for buffer struct failed\n");
167 break;
168 }
169
170 /* Length stores number of planes for multiplanar buffers */
171 if (V4L2_TYPE_IS_MULTIPLANAR(q->type))
172 vb->v4l2_buf.length = num_planes;
173
174 vb->state = VB2_BUF_STATE_DEQUEUED;
175 vb->vb2_queue = q;
176 vb->num_planes = num_planes;
177 vb->v4l2_buf.index = q->num_buffers + buffer;
178 vb->v4l2_buf.type = q->type;
179 vb->v4l2_buf.memory = memory;
180
181 /* Allocate video buffer memory for the MMAP type */
182 if (memory == V4L2_MEMORY_MMAP) {
183 ret = __vb2_buf_mem_alloc(vb);
184 if (ret) {
185 dprintk(1, "Failed allocating memory for "
186 "buffer %d\n", buffer);
187 kfree(vb);
188 break;
189 }
190 /*
191 * Call the driver-provided buffer initialization
192 * callback, if given. An error in initialization
193 * results in queue setup failure.
194 */
195 ret = call_qop(q, buf_init, vb);
196 if (ret) {
197 dprintk(1, "Buffer %d %p initialization"
198 " failed\n", buffer, vb);
199 __vb2_buf_mem_free(vb);
200 kfree(vb);
201 break;
202 }
203 }
204
205 q->bufs[q->num_buffers + buffer] = vb;
206 }
207
208 __setup_offsets(q, buffer);
209
210 dprintk(1, "Allocated %d buffers, %d plane(s) each\n",
211 buffer, num_planes);
212
213 return buffer;
214}
215
216/**
217 * __vb2_free_mem() - release all video buffer memory for a given queue
218 */
219static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
220{
221 unsigned int buffer;
222 struct vb2_buffer *vb;
223
224 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
225 ++buffer) {
226 vb = q->bufs[buffer];
227 if (!vb)
228 continue;
229
230 /* Free MMAP buffers or release USERPTR buffers */
231 if (q->memory == V4L2_MEMORY_MMAP)
232 __vb2_buf_mem_free(vb);
233 else
234 __vb2_buf_userptr_put(vb);
235 }
236}
237
238/**
239 * __vb2_queue_free() - free buffers at the end of the queue - video memory and
240 * related information, if no buffers are left return the queue to an
241 * uninitialized state. Might be called even if the queue has already been freed.
242 */
243static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
244{
245 unsigned int buffer;
246
247 /* Call driver-provided cleanup function for each buffer, if provided */
248 if (q->ops->buf_cleanup) {
249 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
250 ++buffer) {
251 if (NULL == q->bufs[buffer])
252 continue;
253 q->ops->buf_cleanup(q->bufs[buffer]);
254 }
255 }
256
257 /* Release video buffer memory */
258 __vb2_free_mem(q, buffers);
259
260 /* Free videobuf buffers */
261 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
262 ++buffer) {
263 kfree(q->bufs[buffer]);
264 q->bufs[buffer] = NULL;
265 }
266
267 q->num_buffers -= buffers;
268 if (!q->num_buffers)
269 q->memory = 0;
270 INIT_LIST_HEAD(&q->queued_list);
271}
272
273/**
274 * __verify_planes_array() - verify that the planes array passed in struct
275 * v4l2_buffer from userspace can be safely used
276 */
277static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
278{
279 /* Is memory for copying plane information present? */
280 if (NULL == b->m.planes) {
281 dprintk(1, "Multi-planar buffer passed but "
282 "planes array not provided\n");
283 return -EINVAL;
284 }
285
286 if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) {
287 dprintk(1, "Incorrect planes array length, "
288 "expected %d, got %d\n", vb->num_planes, b->length);
289 return -EINVAL;
290 }
291
292 return 0;
293}
294
295/**
296 * __buffer_in_use() - return true if the buffer is in use and
297 * the queue cannot be freed (by the means of REQBUFS(0)) call
298 */
299static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
300{
301 unsigned int plane;
302 for (plane = 0; plane < vb->num_planes; ++plane) {
303 void *mem_priv = vb->planes[plane].mem_priv;
304 /*
305 * If num_users() has not been provided, call_memop
306 * will return 0, apparently nobody cares about this
307 * case anyway. If num_users() returns more than 1,
308 * we are not the only user of the plane's memory.
309 */
310 if (mem_priv && call_memop(q, num_users, mem_priv) > 1)
311 return true;
312 }
313 return false;
314}
315
316/**
317 * __buffers_in_use() - return true if any buffers on the queue are in use and
318 * the queue cannot be freed (by the means of REQBUFS(0)) call
319 */
320static bool __buffers_in_use(struct vb2_queue *q)
321{
322 unsigned int buffer;
323 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
324 if (__buffer_in_use(q, q->bufs[buffer]))
325 return true;
326 }
327 return false;
328}
329
330/**
331 * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
332 * returned to userspace
333 */
334static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
335{
336 struct vb2_queue *q = vb->vb2_queue;
337 int ret;
338
339 /* Copy back data such as timestamp, flags, etc. */
340 memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
341 b->reserved2 = vb->v4l2_buf.reserved2;
342 b->reserved = vb->v4l2_buf.reserved;
343
344 if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
345 ret = __verify_planes_array(vb, b);
346 if (ret)
347 return ret;
348
349 /*
350 * Fill in plane-related data if userspace provided an array
351 * for it. The memory and size is verified above.
352 */
353 memcpy(b->m.planes, vb->v4l2_planes,
354 b->length * sizeof(struct v4l2_plane));
355 } else {
356 /*
357 * We use length and offset in v4l2_planes array even for
358 * single-planar buffers, but userspace does not.
359 */
360 b->length = vb->v4l2_planes[0].length;
361 b->bytesused = vb->v4l2_planes[0].bytesused;
362 if (q->memory == V4L2_MEMORY_MMAP)
363 b->m.offset = vb->v4l2_planes[0].m.mem_offset;
364 else if (q->memory == V4L2_MEMORY_USERPTR)
365 b->m.userptr = vb->v4l2_planes[0].m.userptr;
366 }
367
368 /*
369 * Clear any buffer state related flags.
370 */
371 b->flags &= ~V4L2_BUFFER_STATE_FLAGS;
372
373 switch (vb->state) {
374 case VB2_BUF_STATE_QUEUED:
375 case VB2_BUF_STATE_ACTIVE:
376 b->flags |= V4L2_BUF_FLAG_QUEUED;
377 break;
378 case VB2_BUF_STATE_ERROR:
379 b->flags |= V4L2_BUF_FLAG_ERROR;
380 /* fall through */
381 case VB2_BUF_STATE_DONE:
382 b->flags |= V4L2_BUF_FLAG_DONE;
383 break;
384 case VB2_BUF_STATE_PREPARED:
385 b->flags |= V4L2_BUF_FLAG_PREPARED;
386 break;
387 case VB2_BUF_STATE_DEQUEUED:
388 /* nothing */
389 break;
390 }
391
392 if (__buffer_in_use(q, vb))
393 b->flags |= V4L2_BUF_FLAG_MAPPED;
394
395 return 0;
396}
397
398/**
399 * vb2_querybuf() - query video buffer information
400 * @q: videobuf queue
401 * @b: buffer struct passed from userspace to vidioc_querybuf handler
402 * in driver
403 *
404 * Should be called from vidioc_querybuf ioctl handler in driver.
405 * This function will verify the passed v4l2_buffer structure and fill the
406 * relevant information for the userspace.
407 *
408 * The return values from this function are intended to be directly returned
409 * from vidioc_querybuf handler in driver.
410 */
411int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
412{
413 struct vb2_buffer *vb;
414
415 if (b->type != q->type) {
416 dprintk(1, "querybuf: wrong buffer type\n");
417 return -EINVAL;
418 }
419
420 if (b->index >= q->num_buffers) {
421 dprintk(1, "querybuf: buffer index out of range\n");
422 return -EINVAL;
423 }
424 vb = q->bufs[b->index];
425
426 return __fill_v4l2_buffer(vb, b);
427}
428EXPORT_SYMBOL(vb2_querybuf);
429
430/**
431 * __verify_userptr_ops() - verify that all memory operations required for
432 * USERPTR queue type have been provided
433 */
434static int __verify_userptr_ops(struct vb2_queue *q)
435{
436 if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
437 !q->mem_ops->put_userptr)
438 return -EINVAL;
439
440 return 0;
441}
442
443/**
444 * __verify_mmap_ops() - verify that all memory operations required for
445 * MMAP queue type have been provided
446 */
447static int __verify_mmap_ops(struct vb2_queue *q)
448{
449 if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
450 !q->mem_ops->put || !q->mem_ops->mmap)
451 return -EINVAL;
452
453 return 0;
454}
455
456/**
457 * __verify_memory_type() - Check whether the memory type and buffer type
458 * passed to a buffer operation are compatible with the queue.
459 */
460static int __verify_memory_type(struct vb2_queue *q,
461 enum v4l2_memory memory, enum v4l2_buf_type type)
462{
463 if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR) {
464 dprintk(1, "reqbufs: unsupported memory type\n");
465 return -EINVAL;
466 }
467
468 if (type != q->type) {
469 dprintk(1, "reqbufs: requested type is incorrect\n");
470 return -EINVAL;
471 }
472
473 /*
474 * Make sure all the required memory ops for given memory type
475 * are available.
476 */
477 if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
478 dprintk(1, "reqbufs: MMAP for current setup unsupported\n");
479 return -EINVAL;
480 }
481
482 if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
483 dprintk(1, "reqbufs: USERPTR for current setup unsupported\n");
484 return -EINVAL;
485 }
486
487 /*
488 * Place the busy tests at the end: -EBUSY can be ignored when
489 * create_bufs is called with count == 0, but count == 0 should still
490 * do the memory and type validation.
491 */
492 if (q->fileio) {
493 dprintk(1, "reqbufs: file io in progress\n");
494 return -EBUSY;
495 }
496 return 0;
497}
498
499/**
500 * __reqbufs() - Initiate streaming
501 * @q: videobuf2 queue
502 * @req: struct passed from userspace to vidioc_reqbufs handler in driver
503 *
504 * Should be called from vidioc_reqbufs ioctl handler of a driver.
505 * This function:
506 * 1) verifies streaming parameters passed from the userspace,
507 * 2) sets up the queue,
508 * 3) negotiates number of buffers and planes per buffer with the driver
509 * to be used during streaming,
510 * 4) allocates internal buffer structures (struct vb2_buffer), according to
511 * the agreed parameters,
512 * 5) for MMAP memory type, allocates actual video memory, using the
513 * memory handling/allocation routines provided during queue initialization
514 *
515 * If req->count is 0, all the memory will be freed instead.
516 * If the queue has been allocated previously (by a previous vb2_reqbufs) call
517 * and the queue is not busy, memory will be reallocated.
518 *
519 * The return values from this function are intended to be directly returned
520 * from vidioc_reqbufs handler in driver.
521 */
522static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
523{
524 unsigned int num_buffers, allocated_buffers, num_planes = 0;
525 int ret;
526
527 if (q->streaming) {
528 dprintk(1, "reqbufs: streaming active\n");
529 return -EBUSY;
530 }
531
532 if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
533 /*
534 * We already have buffers allocated, so first check if they
535 * are not in use and can be freed.
536 */
537 if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
538 dprintk(1, "reqbufs: memory in use, cannot free\n");
539 return -EBUSY;
540 }
541
542 __vb2_queue_free(q, q->num_buffers);
543
544 /*
545 * In case of REQBUFS(0) return immediately without calling
546 * driver's queue_setup() callback and allocating resources.
547 */
548 if (req->count == 0)
549 return 0;
550 }
551
552 /*
553 * Make sure the requested values and current defaults are sane.
554 */
555 num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
556 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
557 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
558 q->memory = req->memory;
559
560 /*
561 * Ask the driver how many buffers and planes per buffer it requires.
562 * Driver also sets the size and allocator context for each plane.
563 */
564 ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes,
565 q->plane_sizes, q->alloc_ctx);
566 if (ret)
567 return ret;
568
569 /* Finally, allocate buffers and video memory */
570 ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
571 if (ret == 0) {
572 dprintk(1, "Memory allocation failed\n");
573 return -ENOMEM;
574 }
575
576 allocated_buffers = ret;
577
578 /*
579 * Check if driver can handle the allocated number of buffers.
580 */
581 if (allocated_buffers < num_buffers) {
582 num_buffers = allocated_buffers;
583
584 ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
585 &num_planes, q->plane_sizes, q->alloc_ctx);
586
587 if (!ret && allocated_buffers < num_buffers)
588 ret = -ENOMEM;
589
590 /*
591 * Either the driver has accepted a smaller number of buffers,
592 * or .queue_setup() returned an error
593 */
594 }
595
596 q->num_buffers = allocated_buffers;
597
598 if (ret < 0) {
599 __vb2_queue_free(q, allocated_buffers);
600 return ret;
601 }
602
603 /*
604 * Return the number of successfully allocated buffers
605 * to the userspace.
606 */
607 req->count = allocated_buffers;
608
609 return 0;
610}
611
612/**
613 * vb2_reqbufs() - Wrapper for __reqbufs() that also verifies the memory and
614 * type values.
615 * @q: videobuf2 queue
616 * @req: struct passed from userspace to vidioc_reqbufs handler in driver
617 */
618int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
619{
620 int ret = __verify_memory_type(q, req->memory, req->type);
621
622 return ret ? ret : __reqbufs(q, req);
623}
624EXPORT_SYMBOL_GPL(vb2_reqbufs);
625
626/**
627 * __create_bufs() - Allocate buffers and any required auxiliary structs
628 * @q: videobuf2 queue
629 * @create: creation parameters, passed from userspace to vidioc_create_bufs
630 * handler in driver
631 *
632 * Should be called from vidioc_create_bufs ioctl handler of a driver.
633 * This function:
634 * 1) verifies parameter sanity
635 * 2) calls the .queue_setup() queue operation
636 * 3) performs any necessary memory allocations
637 *
638 * The return values from this function are intended to be directly returned
639 * from vidioc_create_bufs handler in driver.
640 */
641static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
642{
643 unsigned int num_planes = 0, num_buffers, allocated_buffers;
644 int ret;
645
646 if (q->num_buffers == VIDEO_MAX_FRAME) {
647 dprintk(1, "%s(): maximum number of buffers already allocated\n",
648 __func__);
649 return -ENOBUFS;
650 }
651
652 if (!q->num_buffers) {
653 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
654 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
655 q->memory = create->memory;
656 }
657
658 num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
659
660 /*
661 * Ask the driver, whether the requested number of buffers, planes per
662 * buffer and their sizes are acceptable
663 */
664 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
665 &num_planes, q->plane_sizes, q->alloc_ctx);
666 if (ret)
667 return ret;
668
669 /* Finally, allocate buffers and video memory */
670 ret = __vb2_queue_alloc(q, create->memory, num_buffers,
671 num_planes);
672 if (ret == 0) {
673 dprintk(1, "Memory allocation failed\n");
674 return -ENOMEM;
675 }
676
677 allocated_buffers = ret;
678
679 /*
680 * Check if driver can handle the so far allocated number of buffers.
681 */
682 if (ret < num_buffers) {
683 num_buffers = ret;
684
685 /*
686 * q->num_buffers contains the total number of buffers, that the
687 * queue driver has set up
688 */
689 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
690 &num_planes, q->plane_sizes, q->alloc_ctx);
691
692 if (!ret && allocated_buffers < num_buffers)
693 ret = -ENOMEM;
694
695 /*
696 * Either the driver has accepted a smaller number of buffers,
697 * or .queue_setup() returned an error
698 */
699 }
700
701 q->num_buffers += allocated_buffers;
702
703 if (ret < 0) {
704 __vb2_queue_free(q, allocated_buffers);
705 return -ENOMEM;
706 }
707
708 /*
709 * Return the number of successfully allocated buffers
710 * to the userspace.
711 */
712 create->count = allocated_buffers;
713
714 return 0;
715}
716
717/**
718 * vb2_create_bufs() - Wrapper for __create_bufs() that also verifies the
719 * memory and type values.
720 * @q: videobuf2 queue
721 * @create: creation parameters, passed from userspace to vidioc_create_bufs
722 * handler in driver
723 */
724int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
725{
726 int ret = __verify_memory_type(q, create->memory, create->format.type);
727
728 create->index = q->num_buffers;
729 if (create->count == 0)
730 return ret != -EBUSY ? ret : 0;
731 return ret ? ret : __create_bufs(q, create);
732}
733EXPORT_SYMBOL_GPL(vb2_create_bufs);
734
735/**
736 * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
737 * @vb: vb2_buffer to which the plane in question belongs to
738 * @plane_no: plane number for which the address is to be returned
739 *
740 * This function returns a kernel virtual address of a given plane if
741 * such a mapping exist, NULL otherwise.
742 */
743void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
744{
745 struct vb2_queue *q = vb->vb2_queue;
746
747 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
748 return NULL;
749
750 return call_memop(q, vaddr, vb->planes[plane_no].mem_priv);
751
752}
753EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
754
755/**
756 * vb2_plane_cookie() - Return allocator specific cookie for the given plane
757 * @vb: vb2_buffer to which the plane in question belongs to
758 * @plane_no: plane number for which the cookie is to be returned
759 *
760 * This function returns an allocator specific cookie for a given plane if
761 * available, NULL otherwise. The allocator should provide some simple static
762 * inline function, which would convert this cookie to the allocator specific
763 * type that can be used directly by the driver to access the buffer. This can
764 * be for example physical address, pointer to scatter list or IOMMU mapping.
765 */
766void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
767{
768 struct vb2_queue *q = vb->vb2_queue;
769
770 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
771 return NULL;
772
773 return call_memop(q, cookie, vb->planes[plane_no].mem_priv);
774}
775EXPORT_SYMBOL_GPL(vb2_plane_cookie);
776
777/**
778 * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
779 * @vb: vb2_buffer returned from the driver
780 * @state: either VB2_BUF_STATE_DONE if the operation finished successfully
781 * or VB2_BUF_STATE_ERROR if the operation finished with an error
782 *
783 * This function should be called by the driver after a hardware operation on
784 * a buffer is finished and the buffer may be returned to userspace. The driver
785 * cannot use this buffer anymore until it is queued back to it by videobuf
786 * by the means of buf_queue callback. Only buffers previously queued to the
787 * driver by buf_queue can be passed to this function.
788 */
789void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
790{
791 struct vb2_queue *q = vb->vb2_queue;
792 unsigned long flags;
793
794 if (vb->state != VB2_BUF_STATE_ACTIVE)
795 return;
796
797 if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR)
798 return;
799
800 dprintk(4, "Done processing on buffer %d, state: %d\n",
801 vb->v4l2_buf.index, vb->state);
802
803 /* Add the buffer to the done buffers list */
804 spin_lock_irqsave(&q->done_lock, flags);
805 vb->state = state;
806 list_add_tail(&vb->done_entry, &q->done_list);
807 atomic_dec(&q->queued_count);
808 spin_unlock_irqrestore(&q->done_lock, flags);
809
810 /* Inform any processes that may be waiting for buffers */
811 wake_up(&q->done_wq);
812}
813EXPORT_SYMBOL_GPL(vb2_buffer_done);
814
815/**
816 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in
817 * a v4l2_buffer by the userspace
818 */
819static int __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
820 struct v4l2_plane *v4l2_planes)
821{
822 unsigned int plane;
823 int ret;
824
825 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
826 /*
827 * Verify that the userspace gave us a valid array for
828 * plane information.
829 */
830 ret = __verify_planes_array(vb, b);
831 if (ret)
832 return ret;
833
834 /* Fill in driver-provided information for OUTPUT types */
835 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
836 /*
837 * Will have to go up to b->length when API starts
838 * accepting variable number of planes.
839 */
840 for (plane = 0; plane < vb->num_planes; ++plane) {
841 v4l2_planes[plane].bytesused =
842 b->m.planes[plane].bytesused;
843 v4l2_planes[plane].data_offset =
844 b->m.planes[plane].data_offset;
845 }
846 }
847
848 if (b->memory == V4L2_MEMORY_USERPTR) {
849 for (plane = 0; plane < vb->num_planes; ++plane) {
850 v4l2_planes[plane].m.userptr =
851 b->m.planes[plane].m.userptr;
852 v4l2_planes[plane].length =
853 b->m.planes[plane].length;
854 }
855 }
856 } else {
857 /*
858 * Single-planar buffers do not use planes array,
859 * so fill in relevant v4l2_buffer struct fields instead.
860 * In videobuf we use our internal V4l2_planes struct for
861 * single-planar buffers as well, for simplicity.
862 */
863 if (V4L2_TYPE_IS_OUTPUT(b->type))
864 v4l2_planes[0].bytesused = b->bytesused;
865
866 if (b->memory == V4L2_MEMORY_USERPTR) {
867 v4l2_planes[0].m.userptr = b->m.userptr;
868 v4l2_planes[0].length = b->length;
869 }
870 }
871
872 vb->v4l2_buf.field = b->field;
873 vb->v4l2_buf.timestamp = b->timestamp;
874 vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_STATE_FLAGS;
875
876 return 0;
877}
878
879/**
880 * __qbuf_userptr() - handle qbuf of a USERPTR buffer
881 */
882static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
883{
884 struct v4l2_plane planes[VIDEO_MAX_PLANES];
885 struct vb2_queue *q = vb->vb2_queue;
886 void *mem_priv;
887 unsigned int plane;
888 int ret;
889 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
890
891 /* Verify and copy relevant information provided by the userspace */
892 ret = __fill_vb2_buffer(vb, b, planes);
893 if (ret)
894 return ret;
895
896 for (plane = 0; plane < vb->num_planes; ++plane) {
897 /* Skip the plane if already verified */
898 if (vb->v4l2_planes[plane].m.userptr &&
899 vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
900 && vb->v4l2_planes[plane].length == planes[plane].length)
901 continue;
902
903 dprintk(3, "qbuf: userspace address for plane %d changed, "
904 "reacquiring memory\n", plane);
905
906 /* Check if the provided plane buffer is large enough */
907 if (planes[plane].length < q->plane_sizes[plane]) {
908 ret = -EINVAL;
909 goto err;
910 }
911
912 /* Release previously acquired memory if present */
913 if (vb->planes[plane].mem_priv)
914 call_memop(q, put_userptr, vb->planes[plane].mem_priv);
915
916 vb->planes[plane].mem_priv = NULL;
917 vb->v4l2_planes[plane].m.userptr = 0;
918 vb->v4l2_planes[plane].length = 0;
919
920 /* Acquire each plane's memory */
921 mem_priv = call_memop(q, get_userptr, q->alloc_ctx[plane],
922 planes[plane].m.userptr,
923 planes[plane].length, write);
924 if (IS_ERR_OR_NULL(mem_priv)) {
925 dprintk(1, "qbuf: failed acquiring userspace "
926 "memory for plane %d\n", plane);
927 ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
928 goto err;
929 }
930 vb->planes[plane].mem_priv = mem_priv;
931 }
932
933 /*
934 * Call driver-specific initialization on the newly acquired buffer,
935 * if provided.
936 */
937 ret = call_qop(q, buf_init, vb);
938 if (ret) {
939 dprintk(1, "qbuf: buffer initialization failed\n");
940 goto err;
941 }
942
943 /*
944 * Now that everything is in order, copy relevant information
945 * provided by userspace.
946 */
947 for (plane = 0; plane < vb->num_planes; ++plane)
948 vb->v4l2_planes[plane] = planes[plane];
949
950 return 0;
951err:
952 /* In case of errors, release planes that were already acquired */
953 for (plane = 0; plane < vb->num_planes; ++plane) {
954 if (vb->planes[plane].mem_priv)
955 call_memop(q, put_userptr, vb->planes[plane].mem_priv);
956 vb->planes[plane].mem_priv = NULL;
957 vb->v4l2_planes[plane].m.userptr = 0;
958 vb->v4l2_planes[plane].length = 0;
959 }
960
961 return ret;
962}
963
964/**
965 * __qbuf_mmap() - handle qbuf of an MMAP buffer
966 */
967static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
968{
969 return __fill_vb2_buffer(vb, b, vb->v4l2_planes);
970}
971
972/**
973 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
974 */
975static void __enqueue_in_driver(struct vb2_buffer *vb)
976{
977 struct vb2_queue *q = vb->vb2_queue;
978
979 vb->state = VB2_BUF_STATE_ACTIVE;
980 atomic_inc(&q->queued_count);
981 q->ops->buf_queue(vb);
982}
983
984static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
985{
986 struct vb2_queue *q = vb->vb2_queue;
987 int ret;
988
989 switch (q->memory) {
990 case V4L2_MEMORY_MMAP:
991 ret = __qbuf_mmap(vb, b);
992 break;
993 case V4L2_MEMORY_USERPTR:
994 ret = __qbuf_userptr(vb, b);
995 break;
996 default:
997 WARN(1, "Invalid queue type\n");
998 ret = -EINVAL;
999 }
1000
1001 if (!ret)
1002 ret = call_qop(q, buf_prepare, vb);
1003 if (ret)
1004 dprintk(1, "qbuf: buffer preparation failed: %d\n", ret);
1005 else
1006 vb->state = VB2_BUF_STATE_PREPARED;
1007
1008 return ret;
1009}
1010
1011/**
1012 * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
1013 * @q: videobuf2 queue
1014 * @b: buffer structure passed from userspace to vidioc_prepare_buf
1015 * handler in driver
1016 *
1017 * Should be called from vidioc_prepare_buf ioctl handler of a driver.
1018 * This function:
1019 * 1) verifies the passed buffer,
1020 * 2) calls buf_prepare callback in the driver (if provided), in which
1021 * driver-specific buffer initialization can be performed,
1022 *
1023 * The return values from this function are intended to be directly returned
1024 * from vidioc_prepare_buf handler in driver.
1025 */
1026int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
1027{
1028 struct vb2_buffer *vb;
1029 int ret;
1030
1031 if (q->fileio) {
1032 dprintk(1, "%s(): file io in progress\n", __func__);
1033 return -EBUSY;
1034 }
1035
1036 if (b->type != q->type) {
1037 dprintk(1, "%s(): invalid buffer type\n", __func__);
1038 return -EINVAL;
1039 }
1040
1041 if (b->index >= q->num_buffers) {
1042 dprintk(1, "%s(): buffer index out of range\n", __func__);
1043 return -EINVAL;
1044 }
1045
1046 vb = q->bufs[b->index];
1047 if (NULL == vb) {
1048 /* Should never happen */
1049 dprintk(1, "%s(): buffer is NULL\n", __func__);
1050 return -EINVAL;
1051 }
1052
1053 if (b->memory != q->memory) {
1054 dprintk(1, "%s(): invalid memory type\n", __func__);
1055 return -EINVAL;
1056 }
1057
1058 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1059 dprintk(1, "%s(): invalid buffer state %d\n", __func__, vb->state);
1060 return -EINVAL;
1061 }
1062
1063 ret = __buf_prepare(vb, b);
1064 if (ret < 0)
1065 return ret;
1066
1067 __fill_v4l2_buffer(vb, b);
1068
1069 return 0;
1070}
1071EXPORT_SYMBOL_GPL(vb2_prepare_buf);
1072
1073/**
1074 * vb2_qbuf() - Queue a buffer from userspace
1075 * @q: videobuf2 queue
1076 * @b: buffer structure passed from userspace to vidioc_qbuf handler
1077 * in driver
1078 *
1079 * Should be called from vidioc_qbuf ioctl handler of a driver.
1080 * This function:
1081 * 1) verifies the passed buffer,
1082 * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
1083 * which driver-specific buffer initialization can be performed,
1084 * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
1085 * callback for processing.
1086 *
1087 * The return values from this function are intended to be directly returned
1088 * from vidioc_qbuf handler in driver.
1089 */
1090int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
1091{
1092 struct rw_semaphore *mmap_sem = NULL;
1093 struct vb2_buffer *vb;
1094 int ret = 0;
1095
1096 /*
1097 * In case of user pointer buffers vb2 allocator needs to get direct
1098 * access to userspace pages. This requires getting read access on
1099 * mmap semaphore in the current process structure. The same
1100 * semaphore is taken before calling mmap operation, while both mmap
1101 * and qbuf are called by the driver or v4l2 core with driver's lock
1102 * held. To avoid a AB-BA deadlock (mmap_sem then driver's lock in
1103 * mmap and driver's lock then mmap_sem in qbuf) the videobuf2 core
1104 * release driver's lock, takes mmap_sem and then takes again driver's
1105 * lock.
1106 *
1107 * To avoid race with other vb2 calls, which might be called after
1108 * releasing driver's lock, this operation is performed at the
1109 * beggining of qbuf processing. This way the queue status is
1110 * consistent after getting driver's lock back.
1111 */
1112 if (q->memory == V4L2_MEMORY_USERPTR) {
1113 mmap_sem = &current->mm->mmap_sem;
1114 call_qop(q, wait_prepare, q);
1115 down_read(mmap_sem);
1116 call_qop(q, wait_finish, q);
1117 }
1118
1119 if (q->fileio) {
1120 dprintk(1, "qbuf: file io in progress\n");
1121 ret = -EBUSY;
1122 goto unlock;
1123 }
1124
1125 if (b->type != q->type) {
1126 dprintk(1, "qbuf: invalid buffer type\n");
1127 ret = -EINVAL;
1128 goto unlock;
1129 }
1130
1131 if (b->index >= q->num_buffers) {
1132 dprintk(1, "qbuf: buffer index out of range\n");
1133 ret = -EINVAL;
1134 goto unlock;
1135 }
1136
1137 vb = q->bufs[b->index];
1138 if (NULL == vb) {
1139 /* Should never happen */
1140 dprintk(1, "qbuf: buffer is NULL\n");
1141 ret = -EINVAL;
1142 goto unlock;
1143 }
1144
1145 if (b->memory != q->memory) {
1146 dprintk(1, "qbuf: invalid memory type\n");
1147 ret = -EINVAL;
1148 goto unlock;
1149 }
1150
1151 switch (vb->state) {
1152 case VB2_BUF_STATE_DEQUEUED:
1153 ret = __buf_prepare(vb, b);
1154 if (ret)
1155 goto unlock;
1156 case VB2_BUF_STATE_PREPARED:
1157 break;
1158 default:
1159 dprintk(1, "qbuf: buffer already in use\n");
1160 ret = -EINVAL;
1161 goto unlock;
1162 }
1163
1164 /*
1165 * Add to the queued buffers list, a buffer will stay on it until
1166 * dequeued in dqbuf.
1167 */
1168 list_add_tail(&vb->queued_entry, &q->queued_list);
1169 vb->state = VB2_BUF_STATE_QUEUED;
1170
1171 /*
1172 * If already streaming, give the buffer to driver for processing.
1173 * If not, the buffer will be given to driver on next streamon.
1174 */
1175 if (q->streaming)
1176 __enqueue_in_driver(vb);
1177
1178 /* Fill buffer information for the userspace */
1179 __fill_v4l2_buffer(vb, b);
1180
1181 dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
1182unlock:
1183 if (mmap_sem)
1184 up_read(mmap_sem);
1185 return ret;
1186}
1187EXPORT_SYMBOL_GPL(vb2_qbuf);
1188
1189/**
1190 * __vb2_wait_for_done_vb() - wait for a buffer to become available
1191 * for dequeuing
1192 *
1193 * Will sleep if required for nonblocking == false.
1194 */
1195static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1196{
1197 /*
1198 * All operations on vb_done_list are performed under done_lock
1199 * spinlock protection. However, buffers may be removed from
1200 * it and returned to userspace only while holding both driver's
1201 * lock and the done_lock spinlock. Thus we can be sure that as
1202 * long as we hold the driver's lock, the list will remain not
1203 * empty if list_empty() check succeeds.
1204 */
1205
1206 for (;;) {
1207 int ret;
1208
1209 if (!q->streaming) {
1210 dprintk(1, "Streaming off, will not wait for buffers\n");
1211 return -EINVAL;
1212 }
1213
1214 if (!list_empty(&q->done_list)) {
1215 /*
1216 * Found a buffer that we were waiting for.
1217 */
1218 break;
1219 }
1220
1221 if (nonblocking) {
1222 dprintk(1, "Nonblocking and no buffers to dequeue, "
1223 "will not wait\n");
1224 return -EAGAIN;
1225 }
1226
1227 /*
1228 * We are streaming and blocking, wait for another buffer to
1229 * become ready or for streamoff. Driver's lock is released to
1230 * allow streamoff or qbuf to be called while waiting.
1231 */
1232 call_qop(q, wait_prepare, q);
1233
1234 /*
1235 * All locks have been released, it is safe to sleep now.
1236 */
1237 dprintk(3, "Will sleep waiting for buffers\n");
1238 ret = wait_event_interruptible(q->done_wq,
1239 !list_empty(&q->done_list) || !q->streaming);
1240
1241 /*
1242 * We need to reevaluate both conditions again after reacquiring
1243 * the locks or return an error if one occurred.
1244 */
1245 call_qop(q, wait_finish, q);
1246 if (ret)
1247 return ret;
1248 }
1249 return 0;
1250}
1251
1252/**
1253 * __vb2_get_done_vb() - get a buffer ready for dequeuing
1254 *
1255 * Will sleep if required for nonblocking == false.
1256 */
1257static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1258 int nonblocking)
1259{
1260 unsigned long flags;
1261 int ret;
1262
1263 /*
1264 * Wait for at least one buffer to become available on the done_list.
1265 */
1266 ret = __vb2_wait_for_done_vb(q, nonblocking);
1267 if (ret)
1268 return ret;
1269
1270 /*
1271 * Driver's lock has been held since we last verified that done_list
1272 * is not empty, so no need for another list_empty(done_list) check.
1273 */
1274 spin_lock_irqsave(&q->done_lock, flags);
1275 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
1276 list_del(&(*vb)->done_entry);
1277 spin_unlock_irqrestore(&q->done_lock, flags);
1278
1279 return 0;
1280}
1281
1282/**
1283 * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
1284 * @q: videobuf2 queue
1285 *
1286 * This function will wait until all buffers that have been given to the driver
1287 * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call
1288 * wait_prepare, wait_finish pair. It is intended to be called with all locks
1289 * taken, for example from stop_streaming() callback.
1290 */
1291int vb2_wait_for_all_buffers(struct vb2_queue *q)
1292{
1293 if (!q->streaming) {
1294 dprintk(1, "Streaming off, will not wait for buffers\n");
1295 return -EINVAL;
1296 }
1297
1298 wait_event(q->done_wq, !atomic_read(&q->queued_count));
1299 return 0;
1300}
1301EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
1302
1303/**
1304 * vb2_dqbuf() - Dequeue a buffer to the userspace
1305 * @q: videobuf2 queue
1306 * @b: buffer structure passed from userspace to vidioc_dqbuf handler
1307 * in driver
1308 * @nonblocking: if true, this call will not sleep waiting for a buffer if no
1309 * buffers ready for dequeuing are present. Normally the driver
1310 * would be passing (file->f_flags & O_NONBLOCK) here
1311 *
1312 * Should be called from vidioc_dqbuf ioctl handler of a driver.
1313 * This function:
1314 * 1) verifies the passed buffer,
1315 * 2) calls buf_finish callback in the driver (if provided), in which
1316 * driver can perform any additional operations that may be required before
1317 * returning the buffer to userspace, such as cache sync,
1318 * 3) the buffer struct members are filled with relevant information for
1319 * the userspace.
1320 *
1321 * The return values from this function are intended to be directly returned
1322 * from vidioc_dqbuf handler in driver.
1323 */
1324int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
1325{
1326 struct vb2_buffer *vb = NULL;
1327 int ret;
1328
1329 if (q->fileio) {
1330 dprintk(1, "dqbuf: file io in progress\n");
1331 return -EBUSY;
1332 }
1333
1334 if (b->type != q->type) {
1335 dprintk(1, "dqbuf: invalid buffer type\n");
1336 return -EINVAL;
1337 }
1338
1339 ret = __vb2_get_done_vb(q, &vb, nonblocking);
1340 if (ret < 0) {
1341 dprintk(1, "dqbuf: error getting next done buffer\n");
1342 return ret;
1343 }
1344
1345 ret = call_qop(q, buf_finish, vb);
1346 if (ret) {
1347 dprintk(1, "dqbuf: buffer finish failed\n");
1348 return ret;
1349 }
1350
1351 switch (vb->state) {
1352 case VB2_BUF_STATE_DONE:
1353 dprintk(3, "dqbuf: Returning done buffer\n");
1354 break;
1355 case VB2_BUF_STATE_ERROR:
1356 dprintk(3, "dqbuf: Returning done buffer with errors\n");
1357 break;
1358 default:
1359 dprintk(1, "dqbuf: Invalid buffer state\n");
1360 return -EINVAL;
1361 }
1362
1363 /* Fill buffer information for the userspace */
1364 __fill_v4l2_buffer(vb, b);
1365 /* Remove from videobuf queue */
1366 list_del(&vb->queued_entry);
1367
1368 dprintk(1, "dqbuf of buffer %d, with state %d\n",
1369 vb->v4l2_buf.index, vb->state);
1370
1371 vb->state = VB2_BUF_STATE_DEQUEUED;
1372 return 0;
1373}
1374EXPORT_SYMBOL_GPL(vb2_dqbuf);
1375
1376/**
1377 * __vb2_queue_cancel() - cancel and stop (pause) streaming
1378 *
1379 * Removes all queued buffers from driver's queue and all buffers queued by
1380 * userspace from videobuf's queue. Returns to state after reqbufs.
1381 */
1382static void __vb2_queue_cancel(struct vb2_queue *q)
1383{
1384 unsigned int i;
1385
1386 /*
1387 * Tell driver to stop all transactions and release all queued
1388 * buffers.
1389 */
1390 if (q->streaming)
1391 call_qop(q, stop_streaming, q);
1392 q->streaming = 0;
1393
1394 /*
1395 * Remove all buffers from videobuf's list...
1396 */
1397 INIT_LIST_HEAD(&q->queued_list);
1398 /*
1399 * ...and done list; userspace will not receive any buffers it
1400 * has not already dequeued before initiating cancel.
1401 */
1402 INIT_LIST_HEAD(&q->done_list);
1403 atomic_set(&q->queued_count, 0);
1404 wake_up_all(&q->done_wq);
1405
1406 /*
1407 * Reinitialize all buffers for next use.
1408 */
1409 for (i = 0; i < q->num_buffers; ++i)
1410 q->bufs[i]->state = VB2_BUF_STATE_DEQUEUED;
1411}
1412
1413/**
1414 * vb2_streamon - start streaming
1415 * @q: videobuf2 queue
1416 * @type: type argument passed from userspace to vidioc_streamon handler
1417 *
1418 * Should be called from vidioc_streamon handler of a driver.
1419 * This function:
1420 * 1) verifies current state
1421 * 2) passes any previously queued buffers to the driver and starts streaming
1422 *
1423 * The return values from this function are intended to be directly returned
1424 * from vidioc_streamon handler in the driver.
1425 */
1426int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
1427{
1428 struct vb2_buffer *vb;
1429 int ret;
1430
1431 if (q->fileio) {
1432 dprintk(1, "streamon: file io in progress\n");
1433 return -EBUSY;
1434 }
1435
1436 if (type != q->type) {
1437 dprintk(1, "streamon: invalid stream type\n");
1438 return -EINVAL;
1439 }
1440
1441 if (q->streaming) {
1442 dprintk(1, "streamon: already streaming\n");
1443 return -EBUSY;
1444 }
1445
1446 /*
1447 * If any buffers were queued before streamon,
1448 * we can now pass them to driver for processing.
1449 */
1450 list_for_each_entry(vb, &q->queued_list, queued_entry)
1451 __enqueue_in_driver(vb);
1452
1453 /*
1454 * Let driver notice that streaming state has been enabled.
1455 */
1456 ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count));
1457 if (ret) {
1458 dprintk(1, "streamon: driver refused to start streaming\n");
1459 __vb2_queue_cancel(q);
1460 return ret;
1461 }
1462
1463 q->streaming = 1;
1464
1465 dprintk(3, "Streamon successful\n");
1466 return 0;
1467}
1468EXPORT_SYMBOL_GPL(vb2_streamon);
1469
1470
1471/**
1472 * vb2_streamoff - stop streaming
1473 * @q: videobuf2 queue
1474 * @type: type argument passed from userspace to vidioc_streamoff handler
1475 *
1476 * Should be called from vidioc_streamoff handler of a driver.
1477 * This function:
1478 * 1) verifies current state,
1479 * 2) stop streaming and dequeues any queued buffers, including those previously
1480 * passed to the driver (after waiting for the driver to finish).
1481 *
1482 * This call can be used for pausing playback.
1483 * The return values from this function are intended to be directly returned
1484 * from vidioc_streamoff handler in the driver
1485 */
1486int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
1487{
1488 if (q->fileio) {
1489 dprintk(1, "streamoff: file io in progress\n");
1490 return -EBUSY;
1491 }
1492
1493 if (type != q->type) {
1494 dprintk(1, "streamoff: invalid stream type\n");
1495 return -EINVAL;
1496 }
1497
1498 if (!q->streaming) {
1499 dprintk(1, "streamoff: not streaming\n");
1500 return -EINVAL;
1501 }
1502
1503 /*
1504 * Cancel will pause streaming and remove all buffers from the driver
1505 * and videobuf, effectively returning control over them to userspace.
1506 */
1507 __vb2_queue_cancel(q);
1508
1509 dprintk(3, "Streamoff successful\n");
1510 return 0;
1511}
1512EXPORT_SYMBOL_GPL(vb2_streamoff);
1513
1514/**
1515 * __find_plane_by_offset() - find plane associated with the given offset off
1516 */
1517static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
1518 unsigned int *_buffer, unsigned int *_plane)
1519{
1520 struct vb2_buffer *vb;
1521 unsigned int buffer, plane;
1522
1523 /*
1524 * Go over all buffers and their planes, comparing the given offset
1525 * with an offset assigned to each plane. If a match is found,
1526 * return its buffer and plane numbers.
1527 */
1528 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
1529 vb = q->bufs[buffer];
1530
1531 for (plane = 0; plane < vb->num_planes; ++plane) {
1532 if (vb->v4l2_planes[plane].m.mem_offset == off) {
1533 *_buffer = buffer;
1534 *_plane = plane;
1535 return 0;
1536 }
1537 }
1538 }
1539
1540 return -EINVAL;
1541}
1542
1543/**
1544 * vb2_mmap() - map video buffers into application address space
1545 * @q: videobuf2 queue
1546 * @vma: vma passed to the mmap file operation handler in the driver
1547 *
1548 * Should be called from mmap file operation handler of a driver.
1549 * This function maps one plane of one of the available video buffers to
1550 * userspace. To map whole video memory allocated on reqbufs, this function
1551 * has to be called once per each plane per each buffer previously allocated.
1552 *
1553 * When the userspace application calls mmap, it passes to it an offset returned
1554 * to it earlier by the means of vidioc_querybuf handler. That offset acts as
1555 * a "cookie", which is then used to identify the plane to be mapped.
1556 * This function finds a plane with a matching offset and a mapping is performed
1557 * by the means of a provided memory operation.
1558 *
1559 * The return values from this function are intended to be directly returned
1560 * from the mmap handler in driver.
1561 */
1562int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
1563{
1564 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
1565 struct vb2_buffer *vb;
1566 unsigned int buffer, plane;
1567 int ret;
1568
1569 if (q->memory != V4L2_MEMORY_MMAP) {
1570 dprintk(1, "Queue is not currently set up for mmap\n");
1571 return -EINVAL;
1572 }
1573
1574 /*
1575 * Check memory area access mode.
1576 */
1577 if (!(vma->vm_flags & VM_SHARED)) {
1578 dprintk(1, "Invalid vma flags, VM_SHARED needed\n");
1579 return -EINVAL;
1580 }
1581 if (V4L2_TYPE_IS_OUTPUT(q->type)) {
1582 if (!(vma->vm_flags & VM_WRITE)) {
1583 dprintk(1, "Invalid vma flags, VM_WRITE needed\n");
1584 return -EINVAL;
1585 }
1586 } else {
1587 if (!(vma->vm_flags & VM_READ)) {
1588 dprintk(1, "Invalid vma flags, VM_READ needed\n");
1589 return -EINVAL;
1590 }
1591 }
1592
1593 /*
1594 * Find the plane corresponding to the offset passed by userspace.
1595 */
1596 ret = __find_plane_by_offset(q, off, &buffer, &plane);
1597 if (ret)
1598 return ret;
1599
1600 vb = q->bufs[buffer];
1601
1602 ret = call_memop(q, mmap, vb->planes[plane].mem_priv, vma);
1603 if (ret)
1604 return ret;
1605
1606 dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane);
1607 return 0;
1608}
1609EXPORT_SYMBOL_GPL(vb2_mmap);
1610
1611#ifndef CONFIG_MMU
1612unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
1613 unsigned long addr,
1614 unsigned long len,
1615 unsigned long pgoff,
1616 unsigned long flags)
1617{
1618 unsigned long off = pgoff << PAGE_SHIFT;
1619 struct vb2_buffer *vb;
1620 unsigned int buffer, plane;
1621 int ret;
1622
1623 if (q->memory != V4L2_MEMORY_MMAP) {
1624 dprintk(1, "Queue is not currently set up for mmap\n");
1625 return -EINVAL;
1626 }
1627
1628 /*
1629 * Find the plane corresponding to the offset passed by userspace.
1630 */
1631 ret = __find_plane_by_offset(q, off, &buffer, &plane);
1632 if (ret)
1633 return ret;
1634
1635 vb = q->bufs[buffer];
1636
1637 return (unsigned long)vb2_plane_vaddr(vb, plane);
1638}
1639EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
1640#endif
1641
1642static int __vb2_init_fileio(struct vb2_queue *q, int read);
1643static int __vb2_cleanup_fileio(struct vb2_queue *q);
1644
1645/**
1646 * vb2_poll() - implements poll userspace operation
1647 * @q: videobuf2 queue
1648 * @file: file argument passed to the poll file operation handler
1649 * @wait: wait argument passed to the poll file operation handler
1650 *
1651 * This function implements poll file operation handler for a driver.
1652 * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
1653 * be informed that the file descriptor of a video device is available for
1654 * reading.
1655 * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
1656 * will be reported as available for writing.
1657 *
1658 * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
1659 * pending events.
1660 *
1661 * The return values from this function are intended to be directly returned
1662 * from poll handler in driver.
1663 */
1664unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
1665{
1666 struct video_device *vfd = video_devdata(file);
1667 unsigned long req_events = poll_requested_events(wait);
1668 struct vb2_buffer *vb = NULL;
1669 unsigned int res = 0;
1670 unsigned long flags;
1671
1672 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
1673 struct v4l2_fh *fh = file->private_data;
1674
1675 if (v4l2_event_pending(fh))
1676 res = POLLPRI;
1677 else if (req_events & POLLPRI)
1678 poll_wait(file, &fh->wait, wait);
1679 }
1680
1681 /*
1682 * Start file I/O emulator only if streaming API has not been used yet.
1683 */
1684 if (q->num_buffers == 0 && q->fileio == NULL) {
1685 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
1686 (req_events & (POLLIN | POLLRDNORM))) {
1687 if (__vb2_init_fileio(q, 1))
1688 return res | POLLERR;
1689 }
1690 if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
1691 (req_events & (POLLOUT | POLLWRNORM))) {
1692 if (__vb2_init_fileio(q, 0))
1693 return res | POLLERR;
1694 /*
1695 * Write to OUTPUT queue can be done immediately.
1696 */
1697 return res | POLLOUT | POLLWRNORM;
1698 }
1699 }
1700
1701 /*
1702 * There is nothing to wait for if no buffers have already been queued.
1703 */
1704 if (list_empty(&q->queued_list))
1705 return res | POLLERR;
1706
1707 poll_wait(file, &q->done_wq, wait);
1708
1709 /*
1710 * Take first buffer available for dequeuing.
1711 */
1712 spin_lock_irqsave(&q->done_lock, flags);
1713 if (!list_empty(&q->done_list))
1714 vb = list_first_entry(&q->done_list, struct vb2_buffer,
1715 done_entry);
1716 spin_unlock_irqrestore(&q->done_lock, flags);
1717
1718 if (vb && (vb->state == VB2_BUF_STATE_DONE
1719 || vb->state == VB2_BUF_STATE_ERROR)) {
1720 return (V4L2_TYPE_IS_OUTPUT(q->type)) ?
1721 res | POLLOUT | POLLWRNORM :
1722 res | POLLIN | POLLRDNORM;
1723 }
1724 return res;
1725}
1726EXPORT_SYMBOL_GPL(vb2_poll);
1727
1728/**
1729 * vb2_queue_init() - initialize a videobuf2 queue
1730 * @q: videobuf2 queue; this structure should be allocated in driver
1731 *
1732 * The vb2_queue structure should be allocated by the driver. The driver is
1733 * responsible of clearing it's content and setting initial values for some
1734 * required entries before calling this function.
1735 * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
1736 * to the struct vb2_queue description in include/media/videobuf2-core.h
1737 * for more information.
1738 */
1739int vb2_queue_init(struct vb2_queue *q)
1740{
1741 BUG_ON(!q);
1742 BUG_ON(!q->ops);
1743 BUG_ON(!q->mem_ops);
1744 BUG_ON(!q->type);
1745 BUG_ON(!q->io_modes);
1746
1747 BUG_ON(!q->ops->queue_setup);
1748 BUG_ON(!q->ops->buf_queue);
1749
1750 INIT_LIST_HEAD(&q->queued_list);
1751 INIT_LIST_HEAD(&q->done_list);
1752 spin_lock_init(&q->done_lock);
1753 init_waitqueue_head(&q->done_wq);
1754
1755 if (q->buf_struct_size == 0)
1756 q->buf_struct_size = sizeof(struct vb2_buffer);
1757
1758 return 0;
1759}
1760EXPORT_SYMBOL_GPL(vb2_queue_init);
1761
1762/**
1763 * vb2_queue_release() - stop streaming, release the queue and free memory
1764 * @q: videobuf2 queue
1765 *
1766 * This function stops streaming and performs necessary clean ups, including
1767 * freeing video buffer memory. The driver is responsible for freeing
1768 * the vb2_queue structure itself.
1769 */
1770void vb2_queue_release(struct vb2_queue *q)
1771{
1772 __vb2_cleanup_fileio(q);
1773 __vb2_queue_cancel(q);
1774 __vb2_queue_free(q, q->num_buffers);
1775}
1776EXPORT_SYMBOL_GPL(vb2_queue_release);
1777
1778/**
1779 * struct vb2_fileio_buf - buffer context used by file io emulator
1780 *
1781 * vb2 provides a compatibility layer and emulator of file io (read and
1782 * write) calls on top of streaming API. This structure is used for
1783 * tracking context related to the buffers.
1784 */
1785struct vb2_fileio_buf {
1786 void *vaddr;
1787 unsigned int size;
1788 unsigned int pos;
1789 unsigned int queued:1;
1790};
1791
1792/**
1793 * struct vb2_fileio_data - queue context used by file io emulator
1794 *
1795 * vb2 provides a compatibility layer and emulator of file io (read and
1796 * write) calls on top of streaming API. For proper operation it required
1797 * this structure to save the driver state between each call of the read
1798 * or write function.
1799 */
1800struct vb2_fileio_data {
1801 struct v4l2_requestbuffers req;
1802 struct v4l2_buffer b;
1803 struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME];
1804 unsigned int index;
1805 unsigned int q_count;
1806 unsigned int dq_count;
1807 unsigned int flags;
1808};
1809
1810/**
1811 * __vb2_init_fileio() - initialize file io emulator
1812 * @q: videobuf2 queue
1813 * @read: mode selector (1 means read, 0 means write)
1814 */
1815static int __vb2_init_fileio(struct vb2_queue *q, int read)
1816{
1817 struct vb2_fileio_data *fileio;
1818 int i, ret;
1819 unsigned int count = 0;
1820
1821 /*
1822 * Sanity check
1823 */
1824 if ((read && !(q->io_modes & VB2_READ)) ||
1825 (!read && !(q->io_modes & VB2_WRITE)))
1826 BUG();
1827
1828 /*
1829 * Check if device supports mapping buffers to kernel virtual space.
1830 */
1831 if (!q->mem_ops->vaddr)
1832 return -EBUSY;
1833
1834 /*
1835 * Check if streaming api has not been already activated.
1836 */
1837 if (q->streaming || q->num_buffers > 0)
1838 return -EBUSY;
1839
1840 /*
1841 * Start with count 1, driver can increase it in queue_setup()
1842 */
1843 count = 1;
1844
1845 dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n",
1846 (read) ? "read" : "write", count, q->io_flags);
1847
1848 fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
1849 if (fileio == NULL)
1850 return -ENOMEM;
1851
1852 fileio->flags = q->io_flags;
1853
1854 /*
1855 * Request buffers and use MMAP type to force driver
1856 * to allocate buffers by itself.
1857 */
1858 fileio->req.count = count;
1859 fileio->req.memory = V4L2_MEMORY_MMAP;
1860 fileio->req.type = q->type;
1861 ret = vb2_reqbufs(q, &fileio->req);
1862 if (ret)
1863 goto err_kfree;
1864
1865 /*
1866 * Check if plane_count is correct
1867 * (multiplane buffers are not supported).
1868 */
1869 if (q->bufs[0]->num_planes != 1) {
1870 ret = -EBUSY;
1871 goto err_reqbufs;
1872 }
1873
1874 /*
1875 * Get kernel address of each buffer.
1876 */
1877 for (i = 0; i < q->num_buffers; i++) {
1878 fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
1879 if (fileio->bufs[i].vaddr == NULL)
1880 goto err_reqbufs;
1881 fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
1882 }
1883
1884 /*
1885 * Read mode requires pre queuing of all buffers.
1886 */
1887 if (read) {
1888 /*
1889 * Queue all buffers.
1890 */
1891 for (i = 0; i < q->num_buffers; i++) {
1892 struct v4l2_buffer *b = &fileio->b;
1893 memset(b, 0, sizeof(*b));
1894 b->type = q->type;
1895 b->memory = q->memory;
1896 b->index = i;
1897 ret = vb2_qbuf(q, b);
1898 if (ret)
1899 goto err_reqbufs;
1900 fileio->bufs[i].queued = 1;
1901 }
1902
1903 /*
1904 * Start streaming.
1905 */
1906 ret = vb2_streamon(q, q->type);
1907 if (ret)
1908 goto err_reqbufs;
1909 }
1910
1911 q->fileio = fileio;
1912
1913 return ret;
1914
1915err_reqbufs:
1916 fileio->req.count = 0;
1917 vb2_reqbufs(q, &fileio->req);
1918
1919err_kfree:
1920 kfree(fileio);
1921 return ret;
1922}
1923
1924/**
1925 * __vb2_cleanup_fileio() - free resourced used by file io emulator
1926 * @q: videobuf2 queue
1927 */
1928static int __vb2_cleanup_fileio(struct vb2_queue *q)
1929{
1930 struct vb2_fileio_data *fileio = q->fileio;
1931
1932 if (fileio) {
1933 /*
1934 * Hack fileio context to enable direct calls to vb2 ioctl
1935 * interface.
1936 */
1937 q->fileio = NULL;
1938
1939 vb2_streamoff(q, q->type);
1940 fileio->req.count = 0;
1941 vb2_reqbufs(q, &fileio->req);
1942 kfree(fileio);
1943 dprintk(3, "file io emulator closed\n");
1944 }
1945 return 0;
1946}
1947
1948/**
1949 * __vb2_perform_fileio() - perform a single file io (read or write) operation
1950 * @q: videobuf2 queue
1951 * @data: pointed to target userspace buffer
1952 * @count: number of bytes to read or write
1953 * @ppos: file handle position tracking pointer
1954 * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
1955 * @read: access mode selector (1 means read, 0 means write)
1956 */
1957static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
1958 loff_t *ppos, int nonblock, int read)
1959{
1960 struct vb2_fileio_data *fileio;
1961 struct vb2_fileio_buf *buf;
1962 int ret, index;
1963
1964 dprintk(3, "file io: mode %s, offset %ld, count %zd, %sblocking\n",
1965 read ? "read" : "write", (long)*ppos, count,
1966 nonblock ? "non" : "");
1967
1968 if (!data)
1969 return -EINVAL;
1970
1971 /*
1972 * Initialize emulator on first call.
1973 */
1974 if (!q->fileio) {
1975 ret = __vb2_init_fileio(q, read);
1976 dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
1977 if (ret)
1978 return ret;
1979 }
1980 fileio = q->fileio;
1981
1982 /*
1983 * Hack fileio context to enable direct calls to vb2 ioctl interface.
1984 * The pointer will be restored before returning from this function.
1985 */
1986 q->fileio = NULL;
1987
1988 index = fileio->index;
1989 buf = &fileio->bufs[index];
1990
1991 /*
1992 * Check if we need to dequeue the buffer.
1993 */
1994 if (buf->queued) {
1995 struct vb2_buffer *vb;
1996
1997 /*
1998 * Call vb2_dqbuf to get buffer back.
1999 */
2000 memset(&fileio->b, 0, sizeof(fileio->b));
2001 fileio->b.type = q->type;
2002 fileio->b.memory = q->memory;
2003 fileio->b.index = index;
2004 ret = vb2_dqbuf(q, &fileio->b, nonblock);
2005 dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
2006 if (ret)
2007 goto end;
2008 fileio->dq_count += 1;
2009
2010 /*
2011 * Get number of bytes filled by the driver
2012 */
2013 vb = q->bufs[index];
2014 buf->size = vb2_get_plane_payload(vb, 0);
2015 buf->queued = 0;
2016 }
2017
2018 /*
2019 * Limit count on last few bytes of the buffer.
2020 */
2021 if (buf->pos + count > buf->size) {
2022 count = buf->size - buf->pos;
2023 dprintk(5, "reducing read count: %zd\n", count);
2024 }
2025
2026 /*
2027 * Transfer data to userspace.
2028 */
2029 dprintk(3, "file io: copying %zd bytes - buffer %d, offset %u\n",
2030 count, index, buf->pos);
2031 if (read)
2032 ret = copy_to_user(data, buf->vaddr + buf->pos, count);
2033 else
2034 ret = copy_from_user(buf->vaddr + buf->pos, data, count);
2035 if (ret) {
2036 dprintk(3, "file io: error copying data\n");
2037 ret = -EFAULT;
2038 goto end;
2039 }
2040
2041 /*
2042 * Update counters.
2043 */
2044 buf->pos += count;
2045 *ppos += count;
2046
2047 /*
2048 * Queue next buffer if required.
2049 */
2050 if (buf->pos == buf->size ||
2051 (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) {
2052 /*
2053 * Check if this is the last buffer to read.
2054 */
2055 if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
2056 fileio->dq_count == 1) {
2057 dprintk(3, "file io: read limit reached\n");
2058 /*
2059 * Restore fileio pointer and release the context.
2060 */
2061 q->fileio = fileio;
2062 return __vb2_cleanup_fileio(q);
2063 }
2064
2065 /*
2066 * Call vb2_qbuf and give buffer to the driver.
2067 */
2068 memset(&fileio->b, 0, sizeof(fileio->b));
2069 fileio->b.type = q->type;
2070 fileio->b.memory = q->memory;
2071 fileio->b.index = index;
2072 fileio->b.bytesused = buf->pos;
2073 ret = vb2_qbuf(q, &fileio->b);
2074 dprintk(5, "file io: vb2_dbuf result: %d\n", ret);
2075 if (ret)
2076 goto end;
2077
2078 /*
2079 * Buffer has been queued, update the status
2080 */
2081 buf->pos = 0;
2082 buf->queued = 1;
2083 buf->size = q->bufs[0]->v4l2_planes[0].length;
2084 fileio->q_count += 1;
2085
2086 /*
2087 * Switch to the next buffer
2088 */
2089 fileio->index = (index + 1) % q->num_buffers;
2090
2091 /*
2092 * Start streaming if required.
2093 */
2094 if (!read && !q->streaming) {
2095 ret = vb2_streamon(q, q->type);
2096 if (ret)
2097 goto end;
2098 }
2099 }
2100
2101 /*
2102 * Return proper number of bytes processed.
2103 */
2104 if (ret == 0)
2105 ret = count;
2106end:
2107 /*
2108 * Restore the fileio context and block vb2 ioctl interface.
2109 */
2110 q->fileio = fileio;
2111 return ret;
2112}
2113
2114size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
2115 loff_t *ppos, int nonblocking)
2116{
2117 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
2118}
2119EXPORT_SYMBOL_GPL(vb2_read);
2120
2121size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count,
2122 loff_t *ppos, int nonblocking)
2123{
2124 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 0);
2125}
2126EXPORT_SYMBOL_GPL(vb2_write);
2127
2128
2129/*
2130 * The following functions are not part of the vb2 core API, but are helper
2131 * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
2132 * and struct vb2_ops.
2133 * They contain boilerplate code that most if not all drivers have to do
2134 * and so they simplify the driver code.
2135 */
2136
2137/* The queue is busy if there is a owner and you are not that owner. */
2138static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
2139{
2140 return vdev->queue->owner && vdev->queue->owner != file->private_data;
2141}
2142
2143/* vb2 ioctl helpers */
2144
2145int vb2_ioctl_reqbufs(struct file *file, void *priv,
2146 struct v4l2_requestbuffers *p)
2147{
2148 struct video_device *vdev = video_devdata(file);
2149 int res = __verify_memory_type(vdev->queue, p->memory, p->type);
2150
2151 if (res)
2152 return res;
2153 if (vb2_queue_is_busy(vdev, file))
2154 return -EBUSY;
2155 res = __reqbufs(vdev->queue, p);
2156 /* If count == 0, then the owner has released all buffers and he
2157 is no longer owner of the queue. Otherwise we have a new owner. */
2158 if (res == 0)
2159 vdev->queue->owner = p->count ? file->private_data : NULL;
2160 return res;
2161}
2162EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
2163
2164int vb2_ioctl_create_bufs(struct file *file, void *priv,
2165 struct v4l2_create_buffers *p)
2166{
2167 struct video_device *vdev = video_devdata(file);
2168 int res = __verify_memory_type(vdev->queue, p->memory, p->format.type);
2169
2170 p->index = vdev->queue->num_buffers;
2171 /* If count == 0, then just check if memory and type are valid.
2172 Any -EBUSY result from __verify_memory_type can be mapped to 0. */
2173 if (p->count == 0)
2174 return res != -EBUSY ? res : 0;
2175 if (res)
2176 return res;
2177 if (vb2_queue_is_busy(vdev, file))
2178 return -EBUSY;
2179 res = __create_bufs(vdev->queue, p);
2180 if (res == 0)
2181 vdev->queue->owner = file->private_data;
2182 return res;
2183}
2184EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
2185
2186int vb2_ioctl_prepare_buf(struct file *file, void *priv,
2187 struct v4l2_buffer *p)
2188{
2189 struct video_device *vdev = video_devdata(file);
2190
2191 if (vb2_queue_is_busy(vdev, file))
2192 return -EBUSY;
2193 return vb2_prepare_buf(vdev->queue, p);
2194}
2195EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
2196
2197int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
2198{
2199 struct video_device *vdev = video_devdata(file);
2200
2201 /* No need to call vb2_queue_is_busy(), anyone can query buffers. */
2202 return vb2_querybuf(vdev->queue, p);
2203}
2204EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
2205
2206int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
2207{
2208 struct video_device *vdev = video_devdata(file);
2209
2210 if (vb2_queue_is_busy(vdev, file))
2211 return -EBUSY;
2212 return vb2_qbuf(vdev->queue, p);
2213}
2214EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
2215
2216int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
2217{
2218 struct video_device *vdev = video_devdata(file);
2219
2220 if (vb2_queue_is_busy(vdev, file))
2221 return -EBUSY;
2222 return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
2223}
2224EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
2225
2226int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
2227{
2228 struct video_device *vdev = video_devdata(file);
2229
2230 if (vb2_queue_is_busy(vdev, file))
2231 return -EBUSY;
2232 return vb2_streamon(vdev->queue, i);
2233}
2234EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
2235
2236int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
2237{
2238 struct video_device *vdev = video_devdata(file);
2239
2240 if (vb2_queue_is_busy(vdev, file))
2241 return -EBUSY;
2242 return vb2_streamoff(vdev->queue, i);
2243}
2244EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
2245
2246/* v4l2_file_operations helpers */
2247
2248int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
2249{
2250 struct video_device *vdev = video_devdata(file);
2251
2252 return vb2_mmap(vdev->queue, vma);
2253}
2254EXPORT_SYMBOL_GPL(vb2_fop_mmap);
2255
2256int vb2_fop_release(struct file *file)
2257{
2258 struct video_device *vdev = video_devdata(file);
2259
2260 if (file->private_data == vdev->queue->owner) {
2261 vb2_queue_release(vdev->queue);
2262 vdev->queue->owner = NULL;
2263 }
2264 return v4l2_fh_release(file);
2265}
2266EXPORT_SYMBOL_GPL(vb2_fop_release);
2267
2268ssize_t vb2_fop_write(struct file *file, char __user *buf,
2269 size_t count, loff_t *ppos)
2270{
2271 struct video_device *vdev = video_devdata(file);
2272 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
2273 int err = -EBUSY;
2274
2275 if (lock && mutex_lock_interruptible(lock))
2276 return -ERESTARTSYS;
2277 if (vb2_queue_is_busy(vdev, file))
2278 goto exit;
2279 err = vb2_write(vdev->queue, buf, count, ppos,
2280 file->f_flags & O_NONBLOCK);
2281 if (err >= 0)
2282 vdev->queue->owner = file->private_data;
2283exit:
2284 if (lock)
2285 mutex_unlock(lock);
2286 return err;
2287}
2288EXPORT_SYMBOL_GPL(vb2_fop_write);
2289
2290ssize_t vb2_fop_read(struct file *file, char __user *buf,
2291 size_t count, loff_t *ppos)
2292{
2293 struct video_device *vdev = video_devdata(file);
2294 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
2295 int err = -EBUSY;
2296
2297 if (lock && mutex_lock_interruptible(lock))
2298 return -ERESTARTSYS;
2299 if (vb2_queue_is_busy(vdev, file))
2300 goto exit;
2301 err = vb2_read(vdev->queue, buf, count, ppos,
2302 file->f_flags & O_NONBLOCK);
2303 if (err >= 0)
2304 vdev->queue->owner = file->private_data;
2305exit:
2306 if (lock)
2307 mutex_unlock(lock);
2308 return err;
2309}
2310EXPORT_SYMBOL_GPL(vb2_fop_read);
2311
2312unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
2313{
2314 struct video_device *vdev = video_devdata(file);
2315 struct vb2_queue *q = vdev->queue;
2316 struct mutex *lock = q->lock ? q->lock : vdev->lock;
2317 unsigned long req_events = poll_requested_events(wait);
2318 unsigned res;
2319 void *fileio;
2320 bool must_lock = false;
2321
2322 /* Try to be smart: only lock if polling might start fileio,
2323 otherwise locking will only introduce unwanted delays. */
2324 if (q->num_buffers == 0 && q->fileio == NULL) {
2325 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
2326 (req_events & (POLLIN | POLLRDNORM)))
2327 must_lock = true;
2328 else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
2329 (req_events & (POLLOUT | POLLWRNORM)))
2330 must_lock = true;
2331 }
2332
2333 /* If locking is needed, but this helper doesn't know how, then you
2334 shouldn't be using this helper but you should write your own. */
2335 WARN_ON(must_lock && !lock);
2336
2337 if (must_lock && lock && mutex_lock_interruptible(lock))
2338 return POLLERR;
2339
2340 fileio = q->fileio;
2341
2342 res = vb2_poll(vdev->queue, file, wait);
2343
2344 /* If fileio was started, then we have a new queue owner. */
2345 if (must_lock && !fileio && q->fileio)
2346 q->owner = file->private_data;
2347 if (must_lock && lock)
2348 mutex_unlock(lock);
2349 return res;
2350}
2351EXPORT_SYMBOL_GPL(vb2_fop_poll);
2352
2353#ifndef CONFIG_MMU
2354unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
2355 unsigned long len, unsigned long pgoff, unsigned long flags)
2356{
2357 struct video_device *vdev = video_devdata(file);
2358
2359 return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
2360}
2361EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
2362#endif
2363
2364/* vb2_ops helpers. Only use if vq->lock is non-NULL. */
2365
2366void vb2_ops_wait_prepare(struct vb2_queue *vq)
2367{
2368 mutex_unlock(vq->lock);
2369}
2370EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
2371
2372void vb2_ops_wait_finish(struct vb2_queue *vq)
2373{
2374 mutex_lock(vq->lock);
2375}
2376EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
2377
2378MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
2379MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
2380MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
new file mode 100644
index 000000000000..4b7132660a93
--- /dev/null
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -0,0 +1,186 @@
1/*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/dma-mapping.h>
16
17#include <media/videobuf2-core.h>
18#include <media/videobuf2-dma-contig.h>
19#include <media/videobuf2-memops.h>
20
21struct vb2_dc_conf {
22 struct device *dev;
23};
24
25struct vb2_dc_buf {
26 struct vb2_dc_conf *conf;
27 void *vaddr;
28 dma_addr_t dma_addr;
29 unsigned long size;
30 struct vm_area_struct *vma;
31 atomic_t refcount;
32 struct vb2_vmarea_handler handler;
33};
34
35static void vb2_dma_contig_put(void *buf_priv);
36
37static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size)
38{
39 struct vb2_dc_conf *conf = alloc_ctx;
40 struct vb2_dc_buf *buf;
41
42 buf = kzalloc(sizeof *buf, GFP_KERNEL);
43 if (!buf)
44 return ERR_PTR(-ENOMEM);
45
46 buf->vaddr = dma_alloc_coherent(conf->dev, size, &buf->dma_addr,
47 GFP_KERNEL);
48 if (!buf->vaddr) {
49 dev_err(conf->dev, "dma_alloc_coherent of size %ld failed\n",
50 size);
51 kfree(buf);
52 return ERR_PTR(-ENOMEM);
53 }
54
55 buf->conf = conf;
56 buf->size = size;
57
58 buf->handler.refcount = &buf->refcount;
59 buf->handler.put = vb2_dma_contig_put;
60 buf->handler.arg = buf;
61
62 atomic_inc(&buf->refcount);
63
64 return buf;
65}
66
67static void vb2_dma_contig_put(void *buf_priv)
68{
69 struct vb2_dc_buf *buf = buf_priv;
70
71 if (atomic_dec_and_test(&buf->refcount)) {
72 dma_free_coherent(buf->conf->dev, buf->size, buf->vaddr,
73 buf->dma_addr);
74 kfree(buf);
75 }
76}
77
78static void *vb2_dma_contig_cookie(void *buf_priv)
79{
80 struct vb2_dc_buf *buf = buf_priv;
81
82 return &buf->dma_addr;
83}
84
85static void *vb2_dma_contig_vaddr(void *buf_priv)
86{
87 struct vb2_dc_buf *buf = buf_priv;
88 if (!buf)
89 return NULL;
90
91 return buf->vaddr;
92}
93
94static unsigned int vb2_dma_contig_num_users(void *buf_priv)
95{
96 struct vb2_dc_buf *buf = buf_priv;
97
98 return atomic_read(&buf->refcount);
99}
100
101static int vb2_dma_contig_mmap(void *buf_priv, struct vm_area_struct *vma)
102{
103 struct vb2_dc_buf *buf = buf_priv;
104
105 if (!buf) {
106 printk(KERN_ERR "No buffer to map\n");
107 return -EINVAL;
108 }
109
110 return vb2_mmap_pfn_range(vma, buf->dma_addr, buf->size,
111 &vb2_common_vm_ops, &buf->handler);
112}
113
114static void *vb2_dma_contig_get_userptr(void *alloc_ctx, unsigned long vaddr,
115 unsigned long size, int write)
116{
117 struct vb2_dc_buf *buf;
118 struct vm_area_struct *vma;
119 dma_addr_t dma_addr = 0;
120 int ret;
121
122 buf = kzalloc(sizeof *buf, GFP_KERNEL);
123 if (!buf)
124 return ERR_PTR(-ENOMEM);
125
126 ret = vb2_get_contig_userptr(vaddr, size, &vma, &dma_addr);
127 if (ret) {
128 printk(KERN_ERR "Failed acquiring VMA for vaddr 0x%08lx\n",
129 vaddr);
130 kfree(buf);
131 return ERR_PTR(ret);
132 }
133
134 buf->size = size;
135 buf->dma_addr = dma_addr;
136 buf->vma = vma;
137
138 return buf;
139}
140
141static void vb2_dma_contig_put_userptr(void *mem_priv)
142{
143 struct vb2_dc_buf *buf = mem_priv;
144
145 if (!buf)
146 return;
147
148 vb2_put_vma(buf->vma);
149 kfree(buf);
150}
151
152const struct vb2_mem_ops vb2_dma_contig_memops = {
153 .alloc = vb2_dma_contig_alloc,
154 .put = vb2_dma_contig_put,
155 .cookie = vb2_dma_contig_cookie,
156 .vaddr = vb2_dma_contig_vaddr,
157 .mmap = vb2_dma_contig_mmap,
158 .get_userptr = vb2_dma_contig_get_userptr,
159 .put_userptr = vb2_dma_contig_put_userptr,
160 .num_users = vb2_dma_contig_num_users,
161};
162EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
163
164void *vb2_dma_contig_init_ctx(struct device *dev)
165{
166 struct vb2_dc_conf *conf;
167
168 conf = kzalloc(sizeof *conf, GFP_KERNEL);
169 if (!conf)
170 return ERR_PTR(-ENOMEM);
171
172 conf->dev = dev;
173
174 return conf;
175}
176EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
177
178void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
179{
180 kfree(alloc_ctx);
181}
182EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
183
184MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
185MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
186MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
new file mode 100644
index 000000000000..25c3b360e1ad
--- /dev/null
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -0,0 +1,283 @@
1/*
2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19
20#include <media/videobuf2-core.h>
21#include <media/videobuf2-memops.h>
22#include <media/videobuf2-dma-sg.h>
23
24struct vb2_dma_sg_buf {
25 void *vaddr;
26 struct page **pages;
27 int write;
28 int offset;
29 struct vb2_dma_sg_desc sg_desc;
30 atomic_t refcount;
31 struct vb2_vmarea_handler handler;
32};
33
34static void vb2_dma_sg_put(void *buf_priv);
35
36static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
37{
38 struct vb2_dma_sg_buf *buf;
39 int i;
40
41 buf = kzalloc(sizeof *buf, GFP_KERNEL);
42 if (!buf)
43 return NULL;
44
45 buf->vaddr = NULL;
46 buf->write = 0;
47 buf->offset = 0;
48 buf->sg_desc.size = size;
49 buf->sg_desc.num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
50
51 buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
52 sizeof(*buf->sg_desc.sglist));
53 if (!buf->sg_desc.sglist)
54 goto fail_sglist_alloc;
55 sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
56
57 buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
58 GFP_KERNEL);
59 if (!buf->pages)
60 goto fail_pages_array_alloc;
61
62 for (i = 0; i < buf->sg_desc.num_pages; ++i) {
63 buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
64 if (NULL == buf->pages[i])
65 goto fail_pages_alloc;
66 sg_set_page(&buf->sg_desc.sglist[i],
67 buf->pages[i], PAGE_SIZE, 0);
68 }
69
70 buf->handler.refcount = &buf->refcount;
71 buf->handler.put = vb2_dma_sg_put;
72 buf->handler.arg = buf;
73
74 atomic_inc(&buf->refcount);
75
76 printk(KERN_DEBUG "%s: Allocated buffer of %d pages\n",
77 __func__, buf->sg_desc.num_pages);
78 return buf;
79
80fail_pages_alloc:
81 while (--i >= 0)
82 __free_page(buf->pages[i]);
83 kfree(buf->pages);
84
85fail_pages_array_alloc:
86 vfree(buf->sg_desc.sglist);
87
88fail_sglist_alloc:
89 kfree(buf);
90 return NULL;
91}
92
93static void vb2_dma_sg_put(void *buf_priv)
94{
95 struct vb2_dma_sg_buf *buf = buf_priv;
96 int i = buf->sg_desc.num_pages;
97
98 if (atomic_dec_and_test(&buf->refcount)) {
99 printk(KERN_DEBUG "%s: Freeing buffer of %d pages\n", __func__,
100 buf->sg_desc.num_pages);
101 if (buf->vaddr)
102 vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
103 vfree(buf->sg_desc.sglist);
104 while (--i >= 0)
105 __free_page(buf->pages[i]);
106 kfree(buf->pages);
107 kfree(buf);
108 }
109}
110
111static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
112 unsigned long size, int write)
113{
114 struct vb2_dma_sg_buf *buf;
115 unsigned long first, last;
116 int num_pages_from_user, i;
117
118 buf = kzalloc(sizeof *buf, GFP_KERNEL);
119 if (!buf)
120 return NULL;
121
122 buf->vaddr = NULL;
123 buf->write = write;
124 buf->offset = vaddr & ~PAGE_MASK;
125 buf->sg_desc.size = size;
126
127 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
128 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
129 buf->sg_desc.num_pages = last - first + 1;
130
131 buf->sg_desc.sglist = vzalloc(
132 buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
133 if (!buf->sg_desc.sglist)
134 goto userptr_fail_sglist_alloc;
135
136 sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
137
138 buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
139 GFP_KERNEL);
140 if (!buf->pages)
141 goto userptr_fail_pages_array_alloc;
142
143 num_pages_from_user = get_user_pages(current, current->mm,
144 vaddr & PAGE_MASK,
145 buf->sg_desc.num_pages,
146 write,
147 1, /* force */
148 buf->pages,
149 NULL);
150
151 if (num_pages_from_user != buf->sg_desc.num_pages)
152 goto userptr_fail_get_user_pages;
153
154 sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
155 PAGE_SIZE - buf->offset, buf->offset);
156 size -= PAGE_SIZE - buf->offset;
157 for (i = 1; i < buf->sg_desc.num_pages; ++i) {
158 sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
159 min_t(size_t, PAGE_SIZE, size), 0);
160 size -= min_t(size_t, PAGE_SIZE, size);
161 }
162 return buf;
163
164userptr_fail_get_user_pages:
165 printk(KERN_DEBUG "get_user_pages requested/got: %d/%d]\n",
166 num_pages_from_user, buf->sg_desc.num_pages);
167 while (--num_pages_from_user >= 0)
168 put_page(buf->pages[num_pages_from_user]);
169 kfree(buf->pages);
170
171userptr_fail_pages_array_alloc:
172 vfree(buf->sg_desc.sglist);
173
174userptr_fail_sglist_alloc:
175 kfree(buf);
176 return NULL;
177}
178
179/*
180 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
181 * be used
182 */
183static void vb2_dma_sg_put_userptr(void *buf_priv)
184{
185 struct vb2_dma_sg_buf *buf = buf_priv;
186 int i = buf->sg_desc.num_pages;
187
188 printk(KERN_DEBUG "%s: Releasing userspace buffer of %d pages\n",
189 __func__, buf->sg_desc.num_pages);
190 if (buf->vaddr)
191 vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
192 while (--i >= 0) {
193 if (buf->write)
194 set_page_dirty_lock(buf->pages[i]);
195 put_page(buf->pages[i]);
196 }
197 vfree(buf->sg_desc.sglist);
198 kfree(buf->pages);
199 kfree(buf);
200}
201
202static void *vb2_dma_sg_vaddr(void *buf_priv)
203{
204 struct vb2_dma_sg_buf *buf = buf_priv;
205
206 BUG_ON(!buf);
207
208 if (!buf->vaddr)
209 buf->vaddr = vm_map_ram(buf->pages,
210 buf->sg_desc.num_pages,
211 -1,
212 PAGE_KERNEL);
213
214 /* add offset in case userptr is not page-aligned */
215 return buf->vaddr + buf->offset;
216}
217
218static unsigned int vb2_dma_sg_num_users(void *buf_priv)
219{
220 struct vb2_dma_sg_buf *buf = buf_priv;
221
222 return atomic_read(&buf->refcount);
223}
224
225static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
226{
227 struct vb2_dma_sg_buf *buf = buf_priv;
228 unsigned long uaddr = vma->vm_start;
229 unsigned long usize = vma->vm_end - vma->vm_start;
230 int i = 0;
231
232 if (!buf) {
233 printk(KERN_ERR "No memory to map\n");
234 return -EINVAL;
235 }
236
237 do {
238 int ret;
239
240 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
241 if (ret) {
242 printk(KERN_ERR "Remapping memory, error: %d\n", ret);
243 return ret;
244 }
245
246 uaddr += PAGE_SIZE;
247 usize -= PAGE_SIZE;
248 } while (usize > 0);
249
250
251 /*
252 * Use common vm_area operations to track buffer refcount.
253 */
254 vma->vm_private_data = &buf->handler;
255 vma->vm_ops = &vb2_common_vm_ops;
256
257 vma->vm_ops->open(vma);
258
259 return 0;
260}
261
262static void *vb2_dma_sg_cookie(void *buf_priv)
263{
264 struct vb2_dma_sg_buf *buf = buf_priv;
265
266 return &buf->sg_desc;
267}
268
269const struct vb2_mem_ops vb2_dma_sg_memops = {
270 .alloc = vb2_dma_sg_alloc,
271 .put = vb2_dma_sg_put,
272 .get_userptr = vb2_dma_sg_get_userptr,
273 .put_userptr = vb2_dma_sg_put_userptr,
274 .vaddr = vb2_dma_sg_vaddr,
275 .mmap = vb2_dma_sg_mmap,
276 .num_users = vb2_dma_sg_num_users,
277 .cookie = vb2_dma_sg_cookie,
278};
279EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
280
281MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
282MODULE_AUTHOR("Andrzej Pietrasiewicz");
283MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
new file mode 100644
index 000000000000..504cd4cbe29e
--- /dev/null
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -0,0 +1,227 @@
1/*
2 * videobuf2-memops.c - generic memory handling routines for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 * Marek Szyprowski <m.szyprowski@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation.
12 */
13
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/dma-mapping.h>
17#include <linux/vmalloc.h>
18#include <linux/mm.h>
19#include <linux/sched.h>
20#include <linux/file.h>
21
22#include <media/videobuf2-core.h>
23#include <media/videobuf2-memops.h>
24
25/**
26 * vb2_get_vma() - acquire and lock the virtual memory area
27 * @vma: given virtual memory area
28 *
29 * This function attempts to acquire an area mapped in the userspace for
30 * the duration of a hardware operation. The area is "locked" by performing
31 * the same set of operation that are done when process calls fork() and
32 * memory areas are duplicated.
33 *
34 * Returns a copy of a virtual memory region on success or NULL.
35 */
36struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
37{
38 struct vm_area_struct *vma_copy;
39
40 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
41 if (vma_copy == NULL)
42 return NULL;
43
44 if (vma->vm_ops && vma->vm_ops->open)
45 vma->vm_ops->open(vma);
46
47 if (vma->vm_file)
48 get_file(vma->vm_file);
49
50 memcpy(vma_copy, vma, sizeof(*vma));
51
52 vma_copy->vm_mm = NULL;
53 vma_copy->vm_next = NULL;
54 vma_copy->vm_prev = NULL;
55
56 return vma_copy;
57}
58EXPORT_SYMBOL_GPL(vb2_get_vma);
59
60/**
61 * vb2_put_userptr() - release a userspace virtual memory area
62 * @vma: virtual memory region associated with the area to be released
63 *
64 * This function releases the previously acquired memory area after a hardware
65 * operation.
66 */
67void vb2_put_vma(struct vm_area_struct *vma)
68{
69 if (!vma)
70 return;
71
72 if (vma->vm_ops && vma->vm_ops->close)
73 vma->vm_ops->close(vma);
74
75 if (vma->vm_file)
76 fput(vma->vm_file);
77
78 kfree(vma);
79}
80EXPORT_SYMBOL_GPL(vb2_put_vma);
81
82/**
83 * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory
84 * @vaddr: starting virtual address of the area to be verified
85 * @size: size of the area
86 * @res_paddr: will return physical address for the given vaddr
87 * @res_vma: will return locked copy of struct vm_area for the given area
88 *
89 * This function will go through memory area of size @size mapped at @vaddr and
90 * verify that the underlying physical pages are contiguous. If they are
91 * contiguous the virtual memory area is locked and a @res_vma is filled with
92 * the copy and @res_pa set to the physical address of the buffer.
93 *
94 * Returns 0 on success.
95 */
96int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
97 struct vm_area_struct **res_vma, dma_addr_t *res_pa)
98{
99 struct mm_struct *mm = current->mm;
100 struct vm_area_struct *vma;
101 unsigned long offset, start, end;
102 unsigned long this_pfn, prev_pfn;
103 dma_addr_t pa = 0;
104
105 start = vaddr;
106 offset = start & ~PAGE_MASK;
107 end = start + size;
108
109 vma = find_vma(mm, start);
110
111 if (vma == NULL || vma->vm_end < end)
112 return -EFAULT;
113
114 for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
115 int ret = follow_pfn(vma, start, &this_pfn);
116 if (ret)
117 return ret;
118
119 if (prev_pfn == 0)
120 pa = this_pfn << PAGE_SHIFT;
121 else if (this_pfn != prev_pfn + 1)
122 return -EFAULT;
123
124 prev_pfn = this_pfn;
125 }
126
127 /*
128 * Memory is contigous, lock vma and return to the caller
129 */
130 *res_vma = vb2_get_vma(vma);
131 if (*res_vma == NULL)
132 return -ENOMEM;
133
134 *res_pa = pa + offset;
135 return 0;
136}
137EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
138
139/**
140 * vb2_mmap_pfn_range() - map physical pages to userspace
141 * @vma: virtual memory region for the mapping
142 * @paddr: starting physical address of the memory to be mapped
143 * @size: size of the memory to be mapped
144 * @vm_ops: vm operations to be assigned to the created area
145 * @priv: private data to be associated with the area
146 *
147 * Returns 0 on success.
148 */
149int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
150 unsigned long size,
151 const struct vm_operations_struct *vm_ops,
152 void *priv)
153{
154 int ret;
155
156 size = min_t(unsigned long, vma->vm_end - vma->vm_start, size);
157
158 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
159 ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT,
160 size, vma->vm_page_prot);
161 if (ret) {
162 printk(KERN_ERR "Remapping memory failed, error: %d\n", ret);
163 return ret;
164 }
165
166 vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
167 vma->vm_private_data = priv;
168 vma->vm_ops = vm_ops;
169
170 vma->vm_ops->open(vma);
171
172 pr_debug("%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
173 __func__, paddr, vma->vm_start, size);
174
175 return 0;
176}
177EXPORT_SYMBOL_GPL(vb2_mmap_pfn_range);
178
179/**
180 * vb2_common_vm_open() - increase refcount of the vma
181 * @vma: virtual memory region for the mapping
182 *
183 * This function adds another user to the provided vma. It expects
184 * struct vb2_vmarea_handler pointer in vma->vm_private_data.
185 */
186static void vb2_common_vm_open(struct vm_area_struct *vma)
187{
188 struct vb2_vmarea_handler *h = vma->vm_private_data;
189
190 pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
191 __func__, h, atomic_read(h->refcount), vma->vm_start,
192 vma->vm_end);
193
194 atomic_inc(h->refcount);
195}
196
197/**
198 * vb2_common_vm_close() - decrease refcount of the vma
199 * @vma: virtual memory region for the mapping
200 *
201 * This function releases the user from the provided vma. It expects
202 * struct vb2_vmarea_handler pointer in vma->vm_private_data.
203 */
204static void vb2_common_vm_close(struct vm_area_struct *vma)
205{
206 struct vb2_vmarea_handler *h = vma->vm_private_data;
207
208 pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
209 __func__, h, atomic_read(h->refcount), vma->vm_start,
210 vma->vm_end);
211
212 h->put(h->arg);
213}
214
215/**
216 * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
217 * video buffers
218 */
219const struct vm_operations_struct vb2_common_vm_ops = {
220 .open = vb2_common_vm_open,
221 .close = vb2_common_vm_close,
222};
223EXPORT_SYMBOL_GPL(vb2_common_vm_ops);
224
225MODULE_DESCRIPTION("common memory handling routines for videobuf2");
226MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
227MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
new file mode 100644
index 000000000000..94efa04d8d55
--- /dev/null
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -0,0 +1,223 @@
1/*
2 * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13#include <linux/io.h>
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19
20#include <media/videobuf2-core.h>
21#include <media/videobuf2-vmalloc.h>
22#include <media/videobuf2-memops.h>
23
24struct vb2_vmalloc_buf {
25 void *vaddr;
26 struct page **pages;
27 struct vm_area_struct *vma;
28 int write;
29 unsigned long size;
30 unsigned int n_pages;
31 atomic_t refcount;
32 struct vb2_vmarea_handler handler;
33};
34
35static void vb2_vmalloc_put(void *buf_priv);
36
37static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size)
38{
39 struct vb2_vmalloc_buf *buf;
40
41 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
42 if (!buf)
43 return NULL;
44
45 buf->size = size;
46 buf->vaddr = vmalloc_user(buf->size);
47 buf->handler.refcount = &buf->refcount;
48 buf->handler.put = vb2_vmalloc_put;
49 buf->handler.arg = buf;
50
51 if (!buf->vaddr) {
52 pr_debug("vmalloc of size %ld failed\n", buf->size);
53 kfree(buf);
54 return NULL;
55 }
56
57 atomic_inc(&buf->refcount);
58 return buf;
59}
60
61static void vb2_vmalloc_put(void *buf_priv)
62{
63 struct vb2_vmalloc_buf *buf = buf_priv;
64
65 if (atomic_dec_and_test(&buf->refcount)) {
66 vfree(buf->vaddr);
67 kfree(buf);
68 }
69}
70
71static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
72 unsigned long size, int write)
73{
74 struct vb2_vmalloc_buf *buf;
75 unsigned long first, last;
76 int n_pages, offset;
77 struct vm_area_struct *vma;
78 dma_addr_t physp;
79
80 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
81 if (!buf)
82 return NULL;
83
84 buf->write = write;
85 offset = vaddr & ~PAGE_MASK;
86 buf->size = size;
87
88
89 vma = find_vma(current->mm, vaddr);
90 if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
91 if (vb2_get_contig_userptr(vaddr, size, &vma, &physp))
92 goto fail_pages_array_alloc;
93 buf->vma = vma;
94 buf->vaddr = ioremap_nocache(physp, size);
95 if (!buf->vaddr)
96 goto fail_pages_array_alloc;
97 } else {
98 first = vaddr >> PAGE_SHIFT;
99 last = (vaddr + size - 1) >> PAGE_SHIFT;
100 buf->n_pages = last - first + 1;
101 buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
102 GFP_KERNEL);
103 if (!buf->pages)
104 goto fail_pages_array_alloc;
105
106 /* current->mm->mmap_sem is taken by videobuf2 core */
107 n_pages = get_user_pages(current, current->mm,
108 vaddr & PAGE_MASK, buf->n_pages,
109 write, 1, /* force */
110 buf->pages, NULL);
111 if (n_pages != buf->n_pages)
112 goto fail_get_user_pages;
113
114 buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1,
115 PAGE_KERNEL);
116 if (!buf->vaddr)
117 goto fail_get_user_pages;
118 }
119
120 buf->vaddr += offset;
121 return buf;
122
123fail_get_user_pages:
124 pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages,
125 buf->n_pages);
126 while (--n_pages >= 0)
127 put_page(buf->pages[n_pages]);
128 kfree(buf->pages);
129
130fail_pages_array_alloc:
131 kfree(buf);
132
133 return NULL;
134}
135
136static void vb2_vmalloc_put_userptr(void *buf_priv)
137{
138 struct vb2_vmalloc_buf *buf = buf_priv;
139 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
140 unsigned int i;
141
142 if (buf->pages) {
143 if (vaddr)
144 vm_unmap_ram((void *)vaddr, buf->n_pages);
145 for (i = 0; i < buf->n_pages; ++i) {
146 if (buf->write)
147 set_page_dirty_lock(buf->pages[i]);
148 put_page(buf->pages[i]);
149 }
150 kfree(buf->pages);
151 } else {
152 if (buf->vma)
153 vb2_put_vma(buf->vma);
154 iounmap(buf->vaddr);
155 }
156 kfree(buf);
157}
158
159static void *vb2_vmalloc_vaddr(void *buf_priv)
160{
161 struct vb2_vmalloc_buf *buf = buf_priv;
162
163 if (!buf->vaddr) {
164 pr_err("Address of an unallocated plane requested "
165 "or cannot map user pointer\n");
166 return NULL;
167 }
168
169 return buf->vaddr;
170}
171
172static unsigned int vb2_vmalloc_num_users(void *buf_priv)
173{
174 struct vb2_vmalloc_buf *buf = buf_priv;
175 return atomic_read(&buf->refcount);
176}
177
178static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
179{
180 struct vb2_vmalloc_buf *buf = buf_priv;
181 int ret;
182
183 if (!buf) {
184 pr_err("No memory to map\n");
185 return -EINVAL;
186 }
187
188 ret = remap_vmalloc_range(vma, buf->vaddr, 0);
189 if (ret) {
190 pr_err("Remapping vmalloc memory, error: %d\n", ret);
191 return ret;
192 }
193
194 /*
195 * Make sure that vm_areas for 2 buffers won't be merged together
196 */
197 vma->vm_flags |= VM_DONTEXPAND;
198
199 /*
200 * Use common vm_area operations to track buffer refcount.
201 */
202 vma->vm_private_data = &buf->handler;
203 vma->vm_ops = &vb2_common_vm_ops;
204
205 vma->vm_ops->open(vma);
206
207 return 0;
208}
209
210const struct vb2_mem_ops vb2_vmalloc_memops = {
211 .alloc = vb2_vmalloc_alloc,
212 .put = vb2_vmalloc_put,
213 .get_userptr = vb2_vmalloc_get_userptr,
214 .put_userptr = vb2_vmalloc_put_userptr,
215 .vaddr = vb2_vmalloc_vaddr,
216 .mmap = vb2_vmalloc_mmap,
217 .num_users = vb2_vmalloc_num_users,
218};
219EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
220
221MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
222MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
223MODULE_LICENSE("GPL");