aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iio
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-22 19:34:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-22 19:34:21 -0400
commitfb09bafda67041b74a668dc9d77735e36bd33d3b (patch)
tree2dd32b65062a95045468fdcab366ecdb8e4fcac6 /drivers/iio
parent94b5aff4c6f72fee6b0f49d49e4fa8b204e8ded9 (diff)
parentc3c6cc91b0ae7b3d598488ad0b593bafba4a0817 (diff)
Merge tag 'staging-3.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging
Pull staging tree changes from Greg Kroah-Hartman: "Here is the big staging tree pull request for the 3.5-rc1 merge window. Loads of changes here, and we just narrowly added more lines than we added: 622 files changed, 28356 insertions(+), 26059 deletions(-) But, good news is that there is a number of subsystems that moved out of the staging tree, to their respective "real" portions of the kernel. Code that moved out was: - iio core code - mei driver - vme core and bridge drivers There was one broken network driver that moved into staging as a step before it is removed from the tree (pc300), and there was a few new drivers added to the tree: - new iio drivers - gdm72xx wimax USB driver - ipack subsystem and 2 drivers All of the movements around have acks from the various subsystem maintainers, and all of this has been in the linux-next tree for a while. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>" Fixed up various trivial conflicts, along with a non-trivial one found in -next and pointed out by Olof Johanssen: a clean - but incorrect - merge of the arch/arm/boot/dts/at91sam9g20.dtsi file. Fix up manually as per Stephen Rothwell. * tag 'staging-3.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging: (536 commits) Staging: bcm: Remove two unused variables from Adapter.h Staging: bcm: Removes the volatile type definition from Adapter.h Staging: bcm: Rename all "INT" to "int" in Adapter.h Staging: bcm: Fix warning: __packed vs. __attribute__((packed)) in Adapter.h Staging: bcm: Correctly format all comments in Adapter.h Staging: bcm: Fix all whitespace issues in Adapter.h Staging: bcm: Properly format braces in Adapter.h Staging: ipack/bridges/tpci200: remove unneeded casts Staging: ipack/bridges/tpci200: remove TPCI200_SHORTNAME constant Staging: ipack: remove board_name and bus_name fields from struct ipack_device Staging: ipack: improve the register of a bus and a device in the bus. staging: comedi: cleanup all the comedi_driver 'detach' functions staging: comedi: remove all 'default N' in Kconfig staging: line6/config.h: Delete unused header staging: gdm72xx depends on NET staging: gdm72xx: Set up parent link in sysfs for gdm72xx devices staging: drm/omap: initial dmabuf/prime import support staging: drm/omap: dmabuf/prime mmap support pstore/ram: Add ECC support pstore/ram: Switch to persistent_ram routines ...
Diffstat (limited to 'drivers/iio')
-rw-r--r--drivers/iio/Kconfig54
-rw-r--r--drivers/iio/Makefile13
-rw-r--r--drivers/iio/adc/Kconfig16
-rw-r--r--drivers/iio/adc/Makefile5
-rw-r--r--drivers/iio/adc/at91_adc.c802
-rw-r--r--drivers/iio/amplifiers/Kconfig17
-rw-r--r--drivers/iio/amplifiers/Makefile5
-rw-r--r--drivers/iio/amplifiers/ad8366.c222
-rw-r--r--drivers/iio/iio_core.h62
-rw-r--r--drivers/iio/iio_core_trigger.h46
-rw-r--r--drivers/iio/industrialio-buffer.c755
-rw-r--r--drivers/iio/industrialio-core.c913
-rw-r--r--drivers/iio/industrialio-event.c453
-rw-r--r--drivers/iio/industrialio-trigger.c509
-rw-r--r--drivers/iio/inkern.c293
-rw-r--r--drivers/iio/kfifo_buf.c150
16 files changed, 4315 insertions, 0 deletions
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
new file mode 100644
index 000000000000..56eecefcec75
--- /dev/null
+++ b/drivers/iio/Kconfig
@@ -0,0 +1,54 @@
1#
2# Industrial I/O subsytem configuration
3#
4
5menuconfig IIO
6 tristate "Industrial I/O support"
7 depends on GENERIC_HARDIRQS
8 help
9 The industrial I/O subsystem provides a unified framework for
10 drivers for many different types of embedded sensors using a
11 number of different physical interfaces (i2c, spi, etc). See
12 Documentation/iio for more information.
13
14if IIO
15
16config IIO_BUFFER
17 bool "Enable buffer support within IIO"
18 help
19 Provide core support for various buffer based data
20 acquisition methods.
21
22if IIO_BUFFER
23
24config IIO_KFIFO_BUF
25 select IIO_TRIGGER
26 tristate "Industrial I/O buffering based on kfifo"
27 help
28 A simple fifo based on kfifo. Use this if you want a fifo
29 rather than a ring buffer. Note that this currently provides
30 no buffer events so it is up to userspace to work out how
31 often to read from the buffer.
32
33endif # IIO_BUFFER
34
35config IIO_TRIGGER
36 boolean "Enable triggered sampling support"
37 help
38 Provides IIO core support for triggers. Currently these
39 are used to initialize capture of samples to push into
40 ring buffers. The triggers are effectively a 'capture
41 data now' interrupt.
42
43config IIO_CONSUMERS_PER_TRIGGER
44 int "Maximum number of consumers per trigger"
45 depends on IIO_TRIGGER
46 default "2"
47 help
48 This value controls the maximum number of consumers that a
49 given trigger may handle. Default is 2.
50
51source "drivers/iio/adc/Kconfig"
52source "drivers/iio/amplifiers/Kconfig"
53
54endif # IIO
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
new file mode 100644
index 000000000000..e425afd1480c
--- /dev/null
+++ b/drivers/iio/Makefile
@@ -0,0 +1,13 @@
1#
2# Makefile for the industrial I/O core.
3#
4
5obj-$(CONFIG_IIO) += industrialio.o
6industrialio-y := industrialio-core.o industrialio-event.o inkern.o
7industrialio-$(CONFIG_IIO_BUFFER) += industrialio-buffer.o
8industrialio-$(CONFIG_IIO_TRIGGER) += industrialio-trigger.o
9
10obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o
11
12obj-y += adc/
13obj-y += amplifiers/
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
new file mode 100644
index 000000000000..9a0df8123cc4
--- /dev/null
+++ b/drivers/iio/adc/Kconfig
@@ -0,0 +1,16 @@
1#
2# ADC drivers
3#
4menu "Analog to digital converters"
5
6config AT91_ADC
7 tristate "Atmel AT91 ADC"
8 depends on ARCH_AT91
9 select IIO_BUFFER
10 select IIO_KFIFO_BUF
11 select IIO_TRIGGER
12 select SYSFS
13 help
14 Say yes here to build support for Atmel AT91 ADC.
15
16endmenu
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
new file mode 100644
index 000000000000..175c8d41ea99
--- /dev/null
+++ b/drivers/iio/adc/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for IIO ADC drivers
3#
4
5obj-$(CONFIG_AT91_ADC) += at91_adc.o
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
new file mode 100644
index 000000000000..f18a95d80255
--- /dev/null
+++ b/drivers/iio/adc/at91_adc.c
@@ -0,0 +1,802 @@
1/*
2 * Driver for the ADC present in the Atmel AT91 evaluation boards.
3 *
4 * Copyright 2011 Free Electrons
5 *
6 * Licensed under the GPLv2 or later.
7 */
8
9#include <linux/bitmap.h>
10#include <linux/bitops.h>
11#include <linux/clk.h>
12#include <linux/err.h>
13#include <linux/io.h>
14#include <linux/interrupt.h>
15#include <linux/jiffies.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/of_device.h>
20#include <linux/platform_device.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/wait.h>
24
25#include <linux/platform_data/at91_adc.h>
26
27#include <linux/iio/iio.h>
28#include <linux/iio/buffer.h>
29#include <linux/iio/kfifo_buf.h>
30#include <linux/iio/trigger.h>
31#include <linux/iio/trigger_consumer.h>
32
33#include <mach/at91_adc.h>
34
35#define AT91_ADC_CHAN(st, ch) \
36 (st->registers->channel_base + (ch * 4))
37#define at91_adc_readl(st, reg) \
38 (readl_relaxed(st->reg_base + reg))
39#define at91_adc_writel(st, reg, val) \
40 (writel_relaxed(val, st->reg_base + reg))
41
42struct at91_adc_state {
43 struct clk *adc_clk;
44 u16 *buffer;
45 unsigned long channels_mask;
46 struct clk *clk;
47 bool done;
48 int irq;
49 bool irq_enabled;
50 u16 last_value;
51 struct mutex lock;
52 u8 num_channels;
53 void __iomem *reg_base;
54 struct at91_adc_reg_desc *registers;
55 u8 startup_time;
56 struct iio_trigger **trig;
57 struct at91_adc_trigger *trigger_list;
58 u32 trigger_number;
59 bool use_external;
60 u32 vref_mv;
61 wait_queue_head_t wq_data_avail;
62};
63
64static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
65{
66 struct iio_poll_func *pf = p;
67 struct iio_dev *idev = pf->indio_dev;
68 struct at91_adc_state *st = iio_priv(idev);
69 struct iio_buffer *buffer = idev->buffer;
70 int i, j = 0;
71
72 for (i = 0; i < idev->masklength; i++) {
73 if (!test_bit(i, idev->active_scan_mask))
74 continue;
75 st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, i));
76 j++;
77 }
78
79 if (idev->scan_timestamp) {
80 s64 *timestamp = (s64 *)((u8 *)st->buffer +
81 ALIGN(j, sizeof(s64)));
82 *timestamp = pf->timestamp;
83 }
84
85 buffer->access->store_to(buffer, (u8 *)st->buffer, pf->timestamp);
86
87 iio_trigger_notify_done(idev->trig);
88 st->irq_enabled = true;
89
90 /* Needed to ACK the DRDY interruption */
91 at91_adc_readl(st, AT91_ADC_LCDR);
92
93 enable_irq(st->irq);
94
95 return IRQ_HANDLED;
96}
97
98static irqreturn_t at91_adc_eoc_trigger(int irq, void *private)
99{
100 struct iio_dev *idev = private;
101 struct at91_adc_state *st = iio_priv(idev);
102 u32 status = at91_adc_readl(st, st->registers->status_register);
103
104 if (!(status & st->registers->drdy_mask))
105 return IRQ_HANDLED;
106
107 if (iio_buffer_enabled(idev)) {
108 disable_irq_nosync(irq);
109 st->irq_enabled = false;
110 iio_trigger_poll(idev->trig, iio_get_time_ns());
111 } else {
112 st->last_value = at91_adc_readl(st, AT91_ADC_LCDR);
113 st->done = true;
114 wake_up_interruptible(&st->wq_data_avail);
115 }
116
117 return IRQ_HANDLED;
118}
119
120static int at91_adc_channel_init(struct iio_dev *idev)
121{
122 struct at91_adc_state *st = iio_priv(idev);
123 struct iio_chan_spec *chan_array, *timestamp;
124 int bit, idx = 0;
125
126 idev->num_channels = bitmap_weight(&st->channels_mask,
127 st->num_channels) + 1;
128
129 chan_array = devm_kzalloc(&idev->dev,
130 ((idev->num_channels + 1) *
131 sizeof(struct iio_chan_spec)),
132 GFP_KERNEL);
133
134 if (!chan_array)
135 return -ENOMEM;
136
137 for_each_set_bit(bit, &st->channels_mask, st->num_channels) {
138 struct iio_chan_spec *chan = chan_array + idx;
139
140 chan->type = IIO_VOLTAGE;
141 chan->indexed = 1;
142 chan->channel = bit;
143 chan->scan_index = idx;
144 chan->scan_type.sign = 'u';
145 chan->scan_type.realbits = 10;
146 chan->scan_type.storagebits = 16;
147 chan->info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT |
148 IIO_CHAN_INFO_RAW_SEPARATE_BIT;
149 idx++;
150 }
151 timestamp = chan_array + idx;
152
153 timestamp->type = IIO_TIMESTAMP;
154 timestamp->channel = -1;
155 timestamp->scan_index = idx;
156 timestamp->scan_type.sign = 's';
157 timestamp->scan_type.realbits = 64;
158 timestamp->scan_type.storagebits = 64;
159
160 idev->channels = chan_array;
161 return idev->num_channels;
162}
163
164static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
165 struct at91_adc_trigger *triggers,
166 const char *trigger_name)
167{
168 struct at91_adc_state *st = iio_priv(idev);
169 u8 value = 0;
170 int i;
171
172 for (i = 0; i < st->trigger_number; i++) {
173 char *name = kasprintf(GFP_KERNEL,
174 "%s-dev%d-%s",
175 idev->name,
176 idev->id,
177 triggers[i].name);
178 if (!name)
179 return -ENOMEM;
180
181 if (strcmp(trigger_name, name) == 0) {
182 value = triggers[i].value;
183 kfree(name);
184 break;
185 }
186
187 kfree(name);
188 }
189
190 return value;
191}
192
193static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
194{
195 struct iio_dev *idev = trig->private_data;
196 struct at91_adc_state *st = iio_priv(idev);
197 struct iio_buffer *buffer = idev->buffer;
198 struct at91_adc_reg_desc *reg = st->registers;
199 u32 status = at91_adc_readl(st, reg->trigger_register);
200 u8 value;
201 u8 bit;
202
203 value = at91_adc_get_trigger_value_by_name(idev,
204 st->trigger_list,
205 idev->trig->name);
206 if (value == 0)
207 return -EINVAL;
208
209 if (state) {
210 st->buffer = kmalloc(idev->scan_bytes, GFP_KERNEL);
211 if (st->buffer == NULL)
212 return -ENOMEM;
213
214 at91_adc_writel(st, reg->trigger_register,
215 status | value);
216
217 for_each_set_bit(bit, buffer->scan_mask,
218 st->num_channels) {
219 struct iio_chan_spec const *chan = idev->channels + bit;
220 at91_adc_writel(st, AT91_ADC_CHER,
221 AT91_ADC_CH(chan->channel));
222 }
223
224 at91_adc_writel(st, AT91_ADC_IER, reg->drdy_mask);
225
226 } else {
227 at91_adc_writel(st, AT91_ADC_IDR, reg->drdy_mask);
228
229 at91_adc_writel(st, reg->trigger_register,
230 status & ~value);
231
232 for_each_set_bit(bit, buffer->scan_mask,
233 st->num_channels) {
234 struct iio_chan_spec const *chan = idev->channels + bit;
235 at91_adc_writel(st, AT91_ADC_CHDR,
236 AT91_ADC_CH(chan->channel));
237 }
238 kfree(st->buffer);
239 }
240
241 return 0;
242}
243
244static const struct iio_trigger_ops at91_adc_trigger_ops = {
245 .owner = THIS_MODULE,
246 .set_trigger_state = &at91_adc_configure_trigger,
247};
248
249static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *idev,
250 struct at91_adc_trigger *trigger)
251{
252 struct iio_trigger *trig;
253 int ret;
254
255 trig = iio_trigger_alloc("%s-dev%d-%s", idev->name,
256 idev->id, trigger->name);
257 if (trig == NULL)
258 return NULL;
259
260 trig->dev.parent = idev->dev.parent;
261 trig->private_data = idev;
262 trig->ops = &at91_adc_trigger_ops;
263
264 ret = iio_trigger_register(trig);
265 if (ret)
266 return NULL;
267
268 return trig;
269}
270
271static int at91_adc_trigger_init(struct iio_dev *idev)
272{
273 struct at91_adc_state *st = iio_priv(idev);
274 int i, ret;
275
276 st->trig = devm_kzalloc(&idev->dev,
277 st->trigger_number * sizeof(st->trig),
278 GFP_KERNEL);
279
280 if (st->trig == NULL) {
281 ret = -ENOMEM;
282 goto error_ret;
283 }
284
285 for (i = 0; i < st->trigger_number; i++) {
286 if (st->trigger_list[i].is_external && !(st->use_external))
287 continue;
288
289 st->trig[i] = at91_adc_allocate_trigger(idev,
290 st->trigger_list + i);
291 if (st->trig[i] == NULL) {
292 dev_err(&idev->dev,
293 "Could not allocate trigger %d\n", i);
294 ret = -ENOMEM;
295 goto error_trigger;
296 }
297 }
298
299 return 0;
300
301error_trigger:
302 for (i--; i >= 0; i--) {
303 iio_trigger_unregister(st->trig[i]);
304 iio_trigger_free(st->trig[i]);
305 }
306error_ret:
307 return ret;
308}
309
310static void at91_adc_trigger_remove(struct iio_dev *idev)
311{
312 struct at91_adc_state *st = iio_priv(idev);
313 int i;
314
315 for (i = 0; i < st->trigger_number; i++) {
316 iio_trigger_unregister(st->trig[i]);
317 iio_trigger_free(st->trig[i]);
318 }
319}
320
321static const struct iio_buffer_setup_ops at91_adc_buffer_ops = {
322 .preenable = &iio_sw_buffer_preenable,
323 .postenable = &iio_triggered_buffer_postenable,
324 .predisable = &iio_triggered_buffer_predisable,
325};
326
327static int at91_adc_buffer_init(struct iio_dev *idev)
328{
329 int ret;
330
331 idev->buffer = iio_kfifo_allocate(idev);
332 if (!idev->buffer) {
333 ret = -ENOMEM;
334 goto error_ret;
335 }
336
337 idev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
338 &at91_adc_trigger_handler,
339 IRQF_ONESHOT,
340 idev,
341 "%s-consumer%d",
342 idev->name,
343 idev->id);
344 if (idev->pollfunc == NULL) {
345 ret = -ENOMEM;
346 goto error_pollfunc;
347 }
348
349 idev->setup_ops = &at91_adc_buffer_ops;
350 idev->modes |= INDIO_BUFFER_TRIGGERED;
351
352 ret = iio_buffer_register(idev,
353 idev->channels,
354 idev->num_channels);
355 if (ret)
356 goto error_register;
357
358 return 0;
359
360error_register:
361 iio_dealloc_pollfunc(idev->pollfunc);
362error_pollfunc:
363 iio_kfifo_free(idev->buffer);
364error_ret:
365 return ret;
366}
367
368static void at91_adc_buffer_remove(struct iio_dev *idev)
369{
370 iio_buffer_unregister(idev);
371 iio_dealloc_pollfunc(idev->pollfunc);
372 iio_kfifo_free(idev->buffer);
373}
374
375static int at91_adc_read_raw(struct iio_dev *idev,
376 struct iio_chan_spec const *chan,
377 int *val, int *val2, long mask)
378{
379 struct at91_adc_state *st = iio_priv(idev);
380 int ret;
381
382 switch (mask) {
383 case IIO_CHAN_INFO_RAW:
384 mutex_lock(&st->lock);
385
386 at91_adc_writel(st, AT91_ADC_CHER,
387 AT91_ADC_CH(chan->channel));
388 at91_adc_writel(st, AT91_ADC_IER, st->registers->drdy_mask);
389 at91_adc_writel(st, AT91_ADC_CR, AT91_ADC_START);
390
391 ret = wait_event_interruptible_timeout(st->wq_data_avail,
392 st->done,
393 msecs_to_jiffies(1000));
394 if (ret == 0)
395 return -ETIMEDOUT;
396 else if (ret < 0)
397 return ret;
398
399 *val = st->last_value;
400
401 at91_adc_writel(st, AT91_ADC_CHDR,
402 AT91_ADC_CH(chan->channel));
403 at91_adc_writel(st, AT91_ADC_IDR, st->registers->drdy_mask);
404
405 st->last_value = 0;
406 st->done = false;
407 mutex_unlock(&st->lock);
408 return IIO_VAL_INT;
409
410 case IIO_CHAN_INFO_SCALE:
411 *val = (st->vref_mv * 1000) >> chan->scan_type.realbits;
412 *val2 = 0;
413 return IIO_VAL_INT_PLUS_MICRO;
414 default:
415 break;
416 }
417 return -EINVAL;
418}
419
420static int at91_adc_probe_dt(struct at91_adc_state *st,
421 struct platform_device *pdev)
422{
423 struct iio_dev *idev = iio_priv_to_dev(st);
424 struct device_node *node = pdev->dev.of_node;
425 struct device_node *trig_node;
426 int i = 0, ret;
427 u32 prop;
428
429 if (!node)
430 return -EINVAL;
431
432 st->use_external = of_property_read_bool(node, "atmel,adc-use-external-triggers");
433
434 if (of_property_read_u32(node, "atmel,adc-channels-used", &prop)) {
435 dev_err(&idev->dev, "Missing adc-channels-used property in the DT.\n");
436 ret = -EINVAL;
437 goto error_ret;
438 }
439 st->channels_mask = prop;
440
441 if (of_property_read_u32(node, "atmel,adc-num-channels", &prop)) {
442 dev_err(&idev->dev, "Missing adc-num-channels property in the DT.\n");
443 ret = -EINVAL;
444 goto error_ret;
445 }
446 st->num_channels = prop;
447
448 if (of_property_read_u32(node, "atmel,adc-startup-time", &prop)) {
449 dev_err(&idev->dev, "Missing adc-startup-time property in the DT.\n");
450 ret = -EINVAL;
451 goto error_ret;
452 }
453 st->startup_time = prop;
454
455
456 if (of_property_read_u32(node, "atmel,adc-vref", &prop)) {
457 dev_err(&idev->dev, "Missing adc-vref property in the DT.\n");
458 ret = -EINVAL;
459 goto error_ret;
460 }
461 st->vref_mv = prop;
462
463 st->registers = devm_kzalloc(&idev->dev,
464 sizeof(struct at91_adc_reg_desc),
465 GFP_KERNEL);
466 if (!st->registers) {
467 dev_err(&idev->dev, "Could not allocate register memory.\n");
468 ret = -ENOMEM;
469 goto error_ret;
470 }
471
472 if (of_property_read_u32(node, "atmel,adc-channel-base", &prop)) {
473 dev_err(&idev->dev, "Missing adc-channel-base property in the DT.\n");
474 ret = -EINVAL;
475 goto error_ret;
476 }
477 st->registers->channel_base = prop;
478
479 if (of_property_read_u32(node, "atmel,adc-drdy-mask", &prop)) {
480 dev_err(&idev->dev, "Missing adc-drdy-mask property in the DT.\n");
481 ret = -EINVAL;
482 goto error_ret;
483 }
484 st->registers->drdy_mask = prop;
485
486 if (of_property_read_u32(node, "atmel,adc-status-register", &prop)) {
487 dev_err(&idev->dev, "Missing adc-status-register property in the DT.\n");
488 ret = -EINVAL;
489 goto error_ret;
490 }
491 st->registers->status_register = prop;
492
493 if (of_property_read_u32(node, "atmel,adc-trigger-register", &prop)) {
494 dev_err(&idev->dev, "Missing adc-trigger-register property in the DT.\n");
495 ret = -EINVAL;
496 goto error_ret;
497 }
498 st->registers->trigger_register = prop;
499
500 st->trigger_number = of_get_child_count(node);
501 st->trigger_list = devm_kzalloc(&idev->dev, st->trigger_number *
502 sizeof(struct at91_adc_trigger),
503 GFP_KERNEL);
504 if (!st->trigger_list) {
505 dev_err(&idev->dev, "Could not allocate trigger list memory.\n");
506 ret = -ENOMEM;
507 goto error_ret;
508 }
509
510 for_each_child_of_node(node, trig_node) {
511 struct at91_adc_trigger *trig = st->trigger_list + i;
512 const char *name;
513
514 if (of_property_read_string(trig_node, "trigger-name", &name)) {
515 dev_err(&idev->dev, "Missing trigger-name property in the DT.\n");
516 ret = -EINVAL;
517 goto error_ret;
518 }
519 trig->name = name;
520
521 if (of_property_read_u32(trig_node, "trigger-value", &prop)) {
522 dev_err(&idev->dev, "Missing trigger-value property in the DT.\n");
523 ret = -EINVAL;
524 goto error_ret;
525 }
526 trig->value = prop;
527 trig->is_external = of_property_read_bool(trig_node, "trigger-external");
528 i++;
529 }
530
531 return 0;
532
533error_ret:
534 return ret;
535}
536
537static int at91_adc_probe_pdata(struct at91_adc_state *st,
538 struct platform_device *pdev)
539{
540 struct at91_adc_data *pdata = pdev->dev.platform_data;
541
542 if (!pdata)
543 return -EINVAL;
544
545 st->use_external = pdata->use_external_triggers;
546 st->vref_mv = pdata->vref;
547 st->channels_mask = pdata->channels_used;
548 st->num_channels = pdata->num_channels;
549 st->startup_time = pdata->startup_time;
550 st->trigger_number = pdata->trigger_number;
551 st->trigger_list = pdata->trigger_list;
552 st->registers = pdata->registers;
553
554 return 0;
555}
556
557static const struct iio_info at91_adc_info = {
558 .driver_module = THIS_MODULE,
559 .read_raw = &at91_adc_read_raw,
560};
561
562static int __devinit at91_adc_probe(struct platform_device *pdev)
563{
564 unsigned int prsc, mstrclk, ticks, adc_clk;
565 int ret;
566 struct iio_dev *idev;
567 struct at91_adc_state *st;
568 struct resource *res;
569
570 idev = iio_device_alloc(sizeof(struct at91_adc_state));
571 if (idev == NULL) {
572 ret = -ENOMEM;
573 goto error_ret;
574 }
575
576 st = iio_priv(idev);
577
578 if (pdev->dev.of_node)
579 ret = at91_adc_probe_dt(st, pdev);
580 else
581 ret = at91_adc_probe_pdata(st, pdev);
582
583 if (ret) {
584 dev_err(&pdev->dev, "No platform data available.\n");
585 ret = -EINVAL;
586 goto error_free_device;
587 }
588
589 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
590 if (!res) {
591 dev_err(&pdev->dev, "No resource defined\n");
592 ret = -ENXIO;
593 goto error_ret;
594 }
595
596 platform_set_drvdata(pdev, idev);
597
598 idev->dev.parent = &pdev->dev;
599 idev->name = dev_name(&pdev->dev);
600 idev->modes = INDIO_DIRECT_MODE;
601 idev->info = &at91_adc_info;
602
603 st->irq = platform_get_irq(pdev, 0);
604 if (st->irq < 0) {
605 dev_err(&pdev->dev, "No IRQ ID is designated\n");
606 ret = -ENODEV;
607 goto error_free_device;
608 }
609
610 if (!request_mem_region(res->start, resource_size(res),
611 "AT91 adc registers")) {
612 dev_err(&pdev->dev, "Resources are unavailable.\n");
613 ret = -EBUSY;
614 goto error_free_device;
615 }
616
617 st->reg_base = ioremap(res->start, resource_size(res));
618 if (!st->reg_base) {
619 dev_err(&pdev->dev, "Failed to map registers.\n");
620 ret = -ENOMEM;
621 goto error_release_mem;
622 }
623
624 /*
625 * Disable all IRQs before setting up the handler
626 */
627 at91_adc_writel(st, AT91_ADC_CR, AT91_ADC_SWRST);
628 at91_adc_writel(st, AT91_ADC_IDR, 0xFFFFFFFF);
629 ret = request_irq(st->irq,
630 at91_adc_eoc_trigger,
631 0,
632 pdev->dev.driver->name,
633 idev);
634 if (ret) {
635 dev_err(&pdev->dev, "Failed to allocate IRQ.\n");
636 goto error_unmap_reg;
637 }
638
639 st->clk = clk_get(&pdev->dev, "adc_clk");
640 if (IS_ERR(st->clk)) {
641 dev_err(&pdev->dev, "Failed to get the clock.\n");
642 ret = PTR_ERR(st->clk);
643 goto error_free_irq;
644 }
645
646 ret = clk_prepare(st->clk);
647 if (ret) {
648 dev_err(&pdev->dev, "Could not prepare the clock.\n");
649 goto error_free_clk;
650 }
651
652 ret = clk_enable(st->clk);
653 if (ret) {
654 dev_err(&pdev->dev, "Could not enable the clock.\n");
655 goto error_unprepare_clk;
656 }
657
658 st->adc_clk = clk_get(&pdev->dev, "adc_op_clk");
659 if (IS_ERR(st->adc_clk)) {
660 dev_err(&pdev->dev, "Failed to get the ADC clock.\n");
661 ret = PTR_ERR(st->clk);
662 goto error_disable_clk;
663 }
664
665 ret = clk_prepare(st->adc_clk);
666 if (ret) {
667 dev_err(&pdev->dev, "Could not prepare the ADC clock.\n");
668 goto error_free_adc_clk;
669 }
670
671 ret = clk_enable(st->adc_clk);
672 if (ret) {
673 dev_err(&pdev->dev, "Could not enable the ADC clock.\n");
674 goto error_unprepare_adc_clk;
675 }
676
677 /*
678 * Prescaler rate computation using the formula from the Atmel's
679 * datasheet : ADC Clock = MCK / ((Prescaler + 1) * 2), ADC Clock being
680 * specified by the electrical characteristics of the board.
681 */
682 mstrclk = clk_get_rate(st->clk);
683 adc_clk = clk_get_rate(st->adc_clk);
684 prsc = (mstrclk / (2 * adc_clk)) - 1;
685
686 if (!st->startup_time) {
687 dev_err(&pdev->dev, "No startup time available.\n");
688 ret = -EINVAL;
689 goto error_disable_adc_clk;
690 }
691
692 /*
693 * Number of ticks needed to cover the startup time of the ADC as
694 * defined in the electrical characteristics of the board, divided by 8.
695 * The formula thus is : Startup Time = (ticks + 1) * 8 / ADC Clock
696 */
697 ticks = round_up((st->startup_time * adc_clk /
698 1000000) - 1, 8) / 8;
699 at91_adc_writel(st, AT91_ADC_MR,
700 (AT91_ADC_PRESCAL_(prsc) & AT91_ADC_PRESCAL) |
701 (AT91_ADC_STARTUP_(ticks) & AT91_ADC_STARTUP));
702
703 /* Setup the ADC channels available on the board */
704 ret = at91_adc_channel_init(idev);
705 if (ret < 0) {
706 dev_err(&pdev->dev, "Couldn't initialize the channels.\n");
707 goto error_disable_adc_clk;
708 }
709
710 init_waitqueue_head(&st->wq_data_avail);
711 mutex_init(&st->lock);
712
713 ret = at91_adc_buffer_init(idev);
714 if (ret < 0) {
715 dev_err(&pdev->dev, "Couldn't initialize the buffer.\n");
716 goto error_disable_adc_clk;
717 }
718
719 ret = at91_adc_trigger_init(idev);
720 if (ret < 0) {
721 dev_err(&pdev->dev, "Couldn't setup the triggers.\n");
722 goto error_unregister_buffer;
723 }
724
725 ret = iio_device_register(idev);
726 if (ret < 0) {
727 dev_err(&pdev->dev, "Couldn't register the device.\n");
728 goto error_remove_triggers;
729 }
730
731 return 0;
732
733error_remove_triggers:
734 at91_adc_trigger_remove(idev);
735error_unregister_buffer:
736 at91_adc_buffer_remove(idev);
737error_disable_adc_clk:
738 clk_disable(st->adc_clk);
739error_unprepare_adc_clk:
740 clk_unprepare(st->adc_clk);
741error_free_adc_clk:
742 clk_put(st->adc_clk);
743error_disable_clk:
744 clk_disable(st->clk);
745error_unprepare_clk:
746 clk_unprepare(st->clk);
747error_free_clk:
748 clk_put(st->clk);
749error_free_irq:
750 free_irq(st->irq, idev);
751error_unmap_reg:
752 iounmap(st->reg_base);
753error_release_mem:
754 release_mem_region(res->start, resource_size(res));
755error_free_device:
756 iio_device_free(idev);
757error_ret:
758 return ret;
759}
760
761static int __devexit at91_adc_remove(struct platform_device *pdev)
762{
763 struct iio_dev *idev = platform_get_drvdata(pdev);
764 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
765 struct at91_adc_state *st = iio_priv(idev);
766
767 iio_device_unregister(idev);
768 at91_adc_trigger_remove(idev);
769 at91_adc_buffer_remove(idev);
770 clk_disable_unprepare(st->adc_clk);
771 clk_put(st->adc_clk);
772 clk_disable(st->clk);
773 clk_unprepare(st->clk);
774 clk_put(st->clk);
775 free_irq(st->irq, idev);
776 iounmap(st->reg_base);
777 release_mem_region(res->start, resource_size(res));
778 iio_device_free(idev);
779
780 return 0;
781}
782
783static const struct of_device_id at91_adc_dt_ids[] = {
784 { .compatible = "atmel,at91sam9260-adc" },
785 {},
786};
787MODULE_DEVICE_TABLE(of, at91_adc_dt_ids);
788
789static struct platform_driver at91_adc_driver = {
790 .probe = at91_adc_probe,
791 .remove = __devexit_p(at91_adc_remove),
792 .driver = {
793 .name = "at91_adc",
794 .of_match_table = of_match_ptr(at91_adc_dt_ids),
795 },
796};
797
798module_platform_driver(at91_adc_driver);
799
800MODULE_LICENSE("GPL");
801MODULE_DESCRIPTION("Atmel AT91 ADC Driver");
802MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
diff --git a/drivers/iio/amplifiers/Kconfig b/drivers/iio/amplifiers/Kconfig
new file mode 100644
index 000000000000..05d707ed7d4f
--- /dev/null
+++ b/drivers/iio/amplifiers/Kconfig
@@ -0,0 +1,17 @@
1#
2# Gain Amplifiers, etc.
3#
4menu "Amplifiers"
5
6config AD8366
7 tristate "Analog Devices AD8366 VGA"
8 depends on SPI
9 select BITREVERSE
10 help
11 Say yes here to build support for Analog Devices AD8366
12 SPI Dual-Digital Variable Gain Amplifier (VGA).
13
14 To compile this driver as a module, choose M here: the
15 module will be called ad8366.
16
17endmenu
diff --git a/drivers/iio/amplifiers/Makefile b/drivers/iio/amplifiers/Makefile
new file mode 100644
index 000000000000..a6ca366908e0
--- /dev/null
+++ b/drivers/iio/amplifiers/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile iio/amplifiers
3#
4
5obj-$(CONFIG_AD8366) += ad8366.o
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
new file mode 100644
index 000000000000..d8281cdbfc4a
--- /dev/null
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -0,0 +1,222 @@
1/*
2 * AD8366 SPI Dual-Digital Variable Gain Amplifier (VGA)
3 *
4 * Copyright 2012 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2.
7 */
8
9#include <linux/device.h>
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/sysfs.h>
13#include <linux/spi/spi.h>
14#include <linux/regulator/consumer.h>
15#include <linux/err.h>
16#include <linux/module.h>
17#include <linux/bitrev.h>
18
19#include <linux/iio/iio.h>
20#include <linux/iio/sysfs.h>
21
22struct ad8366_state {
23 struct spi_device *spi;
24 struct regulator *reg;
25 unsigned char ch[2];
26 /*
27 * DMA (thus cache coherency maintenance) requires the
28 * transfer buffers to live in their own cache lines.
29 */
30 unsigned char data[2] ____cacheline_aligned;
31};
32
33static int ad8366_write(struct iio_dev *indio_dev,
34 unsigned char ch_a, char unsigned ch_b)
35{
36 struct ad8366_state *st = iio_priv(indio_dev);
37 int ret;
38
39 ch_a = bitrev8(ch_a & 0x3F);
40 ch_b = bitrev8(ch_b & 0x3F);
41
42 st->data[0] = ch_b >> 4;
43 st->data[1] = (ch_b << 4) | (ch_a >> 2);
44
45 ret = spi_write(st->spi, st->data, ARRAY_SIZE(st->data));
46 if (ret < 0)
47 dev_err(&indio_dev->dev, "write failed (%d)", ret);
48
49 return ret;
50}
51
52static int ad8366_read_raw(struct iio_dev *indio_dev,
53 struct iio_chan_spec const *chan,
54 int *val,
55 int *val2,
56 long m)
57{
58 struct ad8366_state *st = iio_priv(indio_dev);
59 int ret;
60 unsigned code;
61
62 mutex_lock(&indio_dev->mlock);
63 switch (m) {
64 case IIO_CHAN_INFO_HARDWAREGAIN:
65 code = st->ch[chan->channel];
66
67 /* Values in dB */
68 code = code * 253 + 4500;
69 *val = code / 1000;
70 *val2 = (code % 1000) * 1000;
71
72 ret = IIO_VAL_INT_PLUS_MICRO_DB;
73 break;
74 default:
75 ret = -EINVAL;
76 }
77 mutex_unlock(&indio_dev->mlock);
78
79 return ret;
80};
81
82static int ad8366_write_raw(struct iio_dev *indio_dev,
83 struct iio_chan_spec const *chan,
84 int val,
85 int val2,
86 long mask)
87{
88 struct ad8366_state *st = iio_priv(indio_dev);
89 unsigned code;
90 int ret;
91
92 if (val < 0 || val2 < 0)
93 return -EINVAL;
94
95 /* Values in dB */
96 code = (((u8)val * 1000) + ((u32)val2 / 1000));
97
98 if (code > 20500 || code < 4500)
99 return -EINVAL;
100
101 code = (code - 4500) / 253;
102
103 mutex_lock(&indio_dev->mlock);
104 switch (mask) {
105 case IIO_CHAN_INFO_HARDWAREGAIN:
106 st->ch[chan->channel] = code;
107 ret = ad8366_write(indio_dev, st->ch[0], st->ch[1]);
108 break;
109 default:
110 ret = -EINVAL;
111 }
112 mutex_unlock(&indio_dev->mlock);
113
114 return ret;
115}
116
117static const struct iio_info ad8366_info = {
118 .read_raw = &ad8366_read_raw,
119 .write_raw = &ad8366_write_raw,
120 .driver_module = THIS_MODULE,
121};
122
123#define AD8366_CHAN(_channel) { \
124 .type = IIO_VOLTAGE, \
125 .output = 1, \
126 .indexed = 1, \
127 .channel = _channel, \
128 .info_mask = IIO_CHAN_INFO_HARDWAREGAIN_SEPARATE_BIT,\
129}
130
131static const struct iio_chan_spec ad8366_channels[] = {
132 AD8366_CHAN(0),
133 AD8366_CHAN(1),
134};
135
136static int __devinit ad8366_probe(struct spi_device *spi)
137{
138 struct iio_dev *indio_dev;
139 struct ad8366_state *st;
140 int ret;
141
142 indio_dev = iio_device_alloc(sizeof(*st));
143 if (indio_dev == NULL)
144 return -ENOMEM;
145
146 st = iio_priv(indio_dev);
147
148 st->reg = regulator_get(&spi->dev, "vcc");
149 if (!IS_ERR(st->reg)) {
150 ret = regulator_enable(st->reg);
151 if (ret)
152 goto error_put_reg;
153 }
154
155 spi_set_drvdata(spi, indio_dev);
156 st->spi = spi;
157
158 indio_dev->dev.parent = &spi->dev;
159 indio_dev->name = spi_get_device_id(spi)->name;
160 indio_dev->info = &ad8366_info;
161 indio_dev->modes = INDIO_DIRECT_MODE;
162 indio_dev->channels = ad8366_channels;
163 indio_dev->num_channels = ARRAY_SIZE(ad8366_channels);
164
165 ret = iio_device_register(indio_dev);
166 if (ret)
167 goto error_disable_reg;
168
169 ad8366_write(indio_dev, 0 , 0);
170
171 return 0;
172
173error_disable_reg:
174 if (!IS_ERR(st->reg))
175 regulator_disable(st->reg);
176error_put_reg:
177 if (!IS_ERR(st->reg))
178 regulator_put(st->reg);
179
180 iio_device_free(indio_dev);
181
182 return ret;
183}
184
185static int __devexit ad8366_remove(struct spi_device *spi)
186{
187 struct iio_dev *indio_dev = spi_get_drvdata(spi);
188 struct ad8366_state *st = iio_priv(indio_dev);
189 struct regulator *reg = st->reg;
190
191 iio_device_unregister(indio_dev);
192
193 if (!IS_ERR(reg)) {
194 regulator_disable(reg);
195 regulator_put(reg);
196 }
197
198 iio_device_free(indio_dev);
199
200 return 0;
201}
202
203static const struct spi_device_id ad8366_id[] = {
204 {"ad8366", 0},
205 {}
206};
207
208static struct spi_driver ad8366_driver = {
209 .driver = {
210 .name = KBUILD_MODNAME,
211 .owner = THIS_MODULE,
212 },
213 .probe = ad8366_probe,
214 .remove = __devexit_p(ad8366_remove),
215 .id_table = ad8366_id,
216};
217
218module_spi_driver(ad8366_driver);
219
220MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
221MODULE_DESCRIPTION("Analog Devices AD8366 VGA");
222MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
new file mode 100644
index 000000000000..f652e6ae5a35
--- /dev/null
+++ b/drivers/iio/iio_core.h
@@ -0,0 +1,62 @@
1/* The industrial I/O core function defs.
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * These definitions are meant for use only within the IIO core, not individual
10 * drivers.
11 */
12
13#ifndef _IIO_CORE_H_
14#define _IIO_CORE_H_
15#include <linux/kernel.h>
16#include <linux/device.h>
17
18struct iio_chan_spec;
19struct iio_dev;
20
21
22int __iio_add_chan_devattr(const char *postfix,
23 struct iio_chan_spec const *chan,
24 ssize_t (*func)(struct device *dev,
25 struct device_attribute *attr,
26 char *buf),
27 ssize_t (*writefunc)(struct device *dev,
28 struct device_attribute *attr,
29 const char *buf,
30 size_t len),
31 u64 mask,
32 bool generic,
33 struct device *dev,
34 struct list_head *attr_list);
35
36/* Event interface flags */
37#define IIO_BUSY_BIT_POS 1
38
39#ifdef CONFIG_IIO_BUFFER
40struct poll_table_struct;
41
42unsigned int iio_buffer_poll(struct file *filp,
43 struct poll_table_struct *wait);
44ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
45 size_t n, loff_t *f_ps);
46
47
48#define iio_buffer_poll_addr (&iio_buffer_poll)
49#define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer)
50
51#else
52
53#define iio_buffer_poll_addr NULL
54#define iio_buffer_read_first_n_outer_addr NULL
55
56#endif
57
58int iio_device_register_eventset(struct iio_dev *indio_dev);
59void iio_device_unregister_eventset(struct iio_dev *indio_dev);
60int iio_event_getfd(struct iio_dev *indio_dev);
61
62#endif
diff --git a/drivers/iio/iio_core_trigger.h b/drivers/iio/iio_core_trigger.h
new file mode 100644
index 000000000000..6f7c56fcbe78
--- /dev/null
+++ b/drivers/iio/iio_core_trigger.h
@@ -0,0 +1,46 @@
1
2/* The industrial I/O core, trigger consumer handling functions
3 *
4 * Copyright (c) 2008 Jonathan Cameron
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 */
10
11#ifdef CONFIG_IIO_TRIGGER
12/**
13 * iio_device_register_trigger_consumer() - set up an iio_dev to use triggers
14 * @indio_dev: iio_dev associated with the device that will consume the trigger
15 **/
16void iio_device_register_trigger_consumer(struct iio_dev *indio_dev);
17
18/**
19 * iio_device_unregister_trigger_consumer() - reverse the registration process
20 * @indio_dev: iio_dev associated with the device that consumed the trigger
21 **/
22void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev);
23
24#else
25
26/**
27 * iio_device_register_trigger_consumer() - set up an iio_dev to use triggers
28 * @indio_dev: iio_dev associated with the device that will consume the trigger
29 **/
30static int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
31{
32 return 0;
33};
34
35/**
36 * iio_device_unregister_trigger_consumer() - reverse the registration process
37 * @indio_dev: iio_dev associated with the device that consumed the trigger
38 **/
39static void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
40{
41};
42
43#endif /* CONFIG_TRIGGER_CONSUMER */
44
45
46
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
new file mode 100644
index 000000000000..ac185b8694bd
--- /dev/null
+++ b/drivers/iio/industrialio-buffer.c
@@ -0,0 +1,755 @@
1/* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * Handling of buffer allocation / resizing.
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/device.h>
19#include <linux/fs.h>
20#include <linux/cdev.h>
21#include <linux/slab.h>
22#include <linux/poll.h>
23
24#include <linux/iio/iio.h>
25#include "iio_core.h"
26#include <linux/iio/sysfs.h>
27#include <linux/iio/buffer.h>
28
29static const char * const iio_endian_prefix[] = {
30 [IIO_BE] = "be",
31 [IIO_LE] = "le",
32};
33
34/**
35 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
36 *
37 * This function relies on all buffer implementations having an
38 * iio_buffer as their first element.
39 **/
40ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
41 size_t n, loff_t *f_ps)
42{
43 struct iio_dev *indio_dev = filp->private_data;
44 struct iio_buffer *rb = indio_dev->buffer;
45
46 if (!rb || !rb->access->read_first_n)
47 return -EINVAL;
48 return rb->access->read_first_n(rb, n, buf);
49}
50
51/**
52 * iio_buffer_poll() - poll the buffer to find out if it has data
53 */
54unsigned int iio_buffer_poll(struct file *filp,
55 struct poll_table_struct *wait)
56{
57 struct iio_dev *indio_dev = filp->private_data;
58 struct iio_buffer *rb = indio_dev->buffer;
59
60 poll_wait(filp, &rb->pollq, wait);
61 if (rb->stufftoread)
62 return POLLIN | POLLRDNORM;
63 /* need a way of knowing if there may be enough data... */
64 return 0;
65}
66
67void iio_buffer_init(struct iio_buffer *buffer)
68{
69 INIT_LIST_HEAD(&buffer->demux_list);
70 init_waitqueue_head(&buffer->pollq);
71}
72EXPORT_SYMBOL(iio_buffer_init);
73
74static ssize_t iio_show_scan_index(struct device *dev,
75 struct device_attribute *attr,
76 char *buf)
77{
78 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
79}
80
81static ssize_t iio_show_fixed_type(struct device *dev,
82 struct device_attribute *attr,
83 char *buf)
84{
85 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
86 u8 type = this_attr->c->scan_type.endianness;
87
88 if (type == IIO_CPU) {
89#ifdef __LITTLE_ENDIAN
90 type = IIO_LE;
91#else
92 type = IIO_BE;
93#endif
94 }
95 return sprintf(buf, "%s:%c%d/%d>>%u\n",
96 iio_endian_prefix[type],
97 this_attr->c->scan_type.sign,
98 this_attr->c->scan_type.realbits,
99 this_attr->c->scan_type.storagebits,
100 this_attr->c->scan_type.shift);
101}
102
103static ssize_t iio_scan_el_show(struct device *dev,
104 struct device_attribute *attr,
105 char *buf)
106{
107 int ret;
108 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
109
110 ret = test_bit(to_iio_dev_attr(attr)->address,
111 indio_dev->buffer->scan_mask);
112
113 return sprintf(buf, "%d\n", ret);
114}
115
116static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
117{
118 clear_bit(bit, buffer->scan_mask);
119 return 0;
120}
121
122static ssize_t iio_scan_el_store(struct device *dev,
123 struct device_attribute *attr,
124 const char *buf,
125 size_t len)
126{
127 int ret;
128 bool state;
129 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
130 struct iio_buffer *buffer = indio_dev->buffer;
131 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
132
133 ret = strtobool(buf, &state);
134 if (ret < 0)
135 return ret;
136 mutex_lock(&indio_dev->mlock);
137 if (iio_buffer_enabled(indio_dev)) {
138 ret = -EBUSY;
139 goto error_ret;
140 }
141 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
142 if (ret < 0)
143 goto error_ret;
144 if (!state && ret) {
145 ret = iio_scan_mask_clear(buffer, this_attr->address);
146 if (ret)
147 goto error_ret;
148 } else if (state && !ret) {
149 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
150 if (ret)
151 goto error_ret;
152 }
153
154error_ret:
155 mutex_unlock(&indio_dev->mlock);
156
157 return ret < 0 ? ret : len;
158
159}
160
161static ssize_t iio_scan_el_ts_show(struct device *dev,
162 struct device_attribute *attr,
163 char *buf)
164{
165 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
166 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
167}
168
169static ssize_t iio_scan_el_ts_store(struct device *dev,
170 struct device_attribute *attr,
171 const char *buf,
172 size_t len)
173{
174 int ret;
175 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
176 bool state;
177
178 ret = strtobool(buf, &state);
179 if (ret < 0)
180 return ret;
181
182 mutex_lock(&indio_dev->mlock);
183 if (iio_buffer_enabled(indio_dev)) {
184 ret = -EBUSY;
185 goto error_ret;
186 }
187 indio_dev->buffer->scan_timestamp = state;
188 indio_dev->scan_timestamp = state;
189error_ret:
190 mutex_unlock(&indio_dev->mlock);
191
192 return ret ? ret : len;
193}
194
195static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
196 const struct iio_chan_spec *chan)
197{
198 int ret, attrcount = 0;
199 struct iio_buffer *buffer = indio_dev->buffer;
200
201 ret = __iio_add_chan_devattr("index",
202 chan,
203 &iio_show_scan_index,
204 NULL,
205 0,
206 0,
207 &indio_dev->dev,
208 &buffer->scan_el_dev_attr_list);
209 if (ret)
210 goto error_ret;
211 attrcount++;
212 ret = __iio_add_chan_devattr("type",
213 chan,
214 &iio_show_fixed_type,
215 NULL,
216 0,
217 0,
218 &indio_dev->dev,
219 &buffer->scan_el_dev_attr_list);
220 if (ret)
221 goto error_ret;
222 attrcount++;
223 if (chan->type != IIO_TIMESTAMP)
224 ret = __iio_add_chan_devattr("en",
225 chan,
226 &iio_scan_el_show,
227 &iio_scan_el_store,
228 chan->scan_index,
229 0,
230 &indio_dev->dev,
231 &buffer->scan_el_dev_attr_list);
232 else
233 ret = __iio_add_chan_devattr("en",
234 chan,
235 &iio_scan_el_ts_show,
236 &iio_scan_el_ts_store,
237 chan->scan_index,
238 0,
239 &indio_dev->dev,
240 &buffer->scan_el_dev_attr_list);
241 attrcount++;
242 ret = attrcount;
243error_ret:
244 return ret;
245}
246
247static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
248 struct iio_dev_attr *p)
249{
250 kfree(p->dev_attr.attr.name);
251 kfree(p);
252}
253
254static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
255{
256 struct iio_dev_attr *p, *n;
257 struct iio_buffer *buffer = indio_dev->buffer;
258
259 list_for_each_entry_safe(p, n,
260 &buffer->scan_el_dev_attr_list, l)
261 iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
262}
263
264static const char * const iio_scan_elements_group_name = "scan_elements";
265
266int iio_buffer_register(struct iio_dev *indio_dev,
267 const struct iio_chan_spec *channels,
268 int num_channels)
269{
270 struct iio_dev_attr *p;
271 struct attribute **attr;
272 struct iio_buffer *buffer = indio_dev->buffer;
273 int ret, i, attrn, attrcount, attrcount_orig = 0;
274
275 if (buffer->attrs)
276 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
277
278 if (buffer->scan_el_attrs != NULL) {
279 attr = buffer->scan_el_attrs->attrs;
280 while (*attr++ != NULL)
281 attrcount_orig++;
282 }
283 attrcount = attrcount_orig;
284 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
285 if (channels) {
286 /* new magic */
287 for (i = 0; i < num_channels; i++) {
288 /* Establish necessary mask length */
289 if (channels[i].scan_index >
290 (int)indio_dev->masklength - 1)
291 indio_dev->masklength
292 = indio_dev->channels[i].scan_index + 1;
293
294 ret = iio_buffer_add_channel_sysfs(indio_dev,
295 &channels[i]);
296 if (ret < 0)
297 goto error_cleanup_dynamic;
298 attrcount += ret;
299 if (channels[i].type == IIO_TIMESTAMP)
300 indio_dev->scan_index_timestamp =
301 channels[i].scan_index;
302 }
303 if (indio_dev->masklength && buffer->scan_mask == NULL) {
304 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
305 sizeof(*buffer->scan_mask),
306 GFP_KERNEL);
307 if (buffer->scan_mask == NULL) {
308 ret = -ENOMEM;
309 goto error_cleanup_dynamic;
310 }
311 }
312 }
313
314 buffer->scan_el_group.name = iio_scan_elements_group_name;
315
316 buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
317 sizeof(buffer->scan_el_group.attrs[0]),
318 GFP_KERNEL);
319 if (buffer->scan_el_group.attrs == NULL) {
320 ret = -ENOMEM;
321 goto error_free_scan_mask;
322 }
323 if (buffer->scan_el_attrs)
324 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
325 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
326 attrn = attrcount_orig;
327
328 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
329 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
330 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
331
332 return 0;
333
334error_free_scan_mask:
335 kfree(buffer->scan_mask);
336error_cleanup_dynamic:
337 __iio_buffer_attr_cleanup(indio_dev);
338
339 return ret;
340}
341EXPORT_SYMBOL(iio_buffer_register);
342
343void iio_buffer_unregister(struct iio_dev *indio_dev)
344{
345 kfree(indio_dev->buffer->scan_mask);
346 kfree(indio_dev->buffer->scan_el_group.attrs);
347 __iio_buffer_attr_cleanup(indio_dev);
348}
349EXPORT_SYMBOL(iio_buffer_unregister);
350
351ssize_t iio_buffer_read_length(struct device *dev,
352 struct device_attribute *attr,
353 char *buf)
354{
355 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
356 struct iio_buffer *buffer = indio_dev->buffer;
357
358 if (buffer->access->get_length)
359 return sprintf(buf, "%d\n",
360 buffer->access->get_length(buffer));
361
362 return 0;
363}
364EXPORT_SYMBOL(iio_buffer_read_length);
365
366ssize_t iio_buffer_write_length(struct device *dev,
367 struct device_attribute *attr,
368 const char *buf,
369 size_t len)
370{
371 int ret;
372 ulong val;
373 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
374 struct iio_buffer *buffer = indio_dev->buffer;
375
376 ret = strict_strtoul(buf, 10, &val);
377 if (ret)
378 return ret;
379
380 if (buffer->access->get_length)
381 if (val == buffer->access->get_length(buffer))
382 return len;
383
384 mutex_lock(&indio_dev->mlock);
385 if (iio_buffer_enabled(indio_dev)) {
386 ret = -EBUSY;
387 } else {
388 if (buffer->access->set_length)
389 buffer->access->set_length(buffer, val);
390 ret = 0;
391 }
392 mutex_unlock(&indio_dev->mlock);
393
394 return ret ? ret : len;
395}
396EXPORT_SYMBOL(iio_buffer_write_length);
397
398ssize_t iio_buffer_store_enable(struct device *dev,
399 struct device_attribute *attr,
400 const char *buf,
401 size_t len)
402{
403 int ret;
404 bool requested_state, current_state;
405 int previous_mode;
406 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
407 struct iio_buffer *buffer = indio_dev->buffer;
408
409 mutex_lock(&indio_dev->mlock);
410 previous_mode = indio_dev->currentmode;
411 requested_state = !(buf[0] == '0');
412 current_state = iio_buffer_enabled(indio_dev);
413 if (current_state == requested_state) {
414 printk(KERN_INFO "iio-buffer, current state requested again\n");
415 goto done;
416 }
417 if (requested_state) {
418 if (indio_dev->setup_ops->preenable) {
419 ret = indio_dev->setup_ops->preenable(indio_dev);
420 if (ret) {
421 printk(KERN_ERR
422 "Buffer not started:"
423 "buffer preenable failed\n");
424 goto error_ret;
425 }
426 }
427 if (buffer->access->request_update) {
428 ret = buffer->access->request_update(buffer);
429 if (ret) {
430 printk(KERN_INFO
431 "Buffer not started:"
432 "buffer parameter update failed\n");
433 goto error_ret;
434 }
435 }
436 /* Definitely possible for devices to support both of these.*/
437 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
438 if (!indio_dev->trig) {
439 printk(KERN_INFO
440 "Buffer not started: no trigger\n");
441 ret = -EINVAL;
442 goto error_ret;
443 }
444 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
445 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE)
446 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
447 else { /* should never be reached */
448 ret = -EINVAL;
449 goto error_ret;
450 }
451
452 if (indio_dev->setup_ops->postenable) {
453 ret = indio_dev->setup_ops->postenable(indio_dev);
454 if (ret) {
455 printk(KERN_INFO
456 "Buffer not started:"
457 "postenable failed\n");
458 indio_dev->currentmode = previous_mode;
459 if (indio_dev->setup_ops->postdisable)
460 indio_dev->setup_ops->
461 postdisable(indio_dev);
462 goto error_ret;
463 }
464 }
465 } else {
466 if (indio_dev->setup_ops->predisable) {
467 ret = indio_dev->setup_ops->predisable(indio_dev);
468 if (ret)
469 goto error_ret;
470 }
471 indio_dev->currentmode = INDIO_DIRECT_MODE;
472 if (indio_dev->setup_ops->postdisable) {
473 ret = indio_dev->setup_ops->postdisable(indio_dev);
474 if (ret)
475 goto error_ret;
476 }
477 }
478done:
479 mutex_unlock(&indio_dev->mlock);
480 return len;
481
482error_ret:
483 mutex_unlock(&indio_dev->mlock);
484 return ret;
485}
486EXPORT_SYMBOL(iio_buffer_store_enable);
487
488ssize_t iio_buffer_show_enable(struct device *dev,
489 struct device_attribute *attr,
490 char *buf)
491{
492 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
493 return sprintf(buf, "%d\n", iio_buffer_enabled(indio_dev));
494}
495EXPORT_SYMBOL(iio_buffer_show_enable);
496
497/* note NULL used as error indicator as it doesn't make sense. */
498static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
499 unsigned int masklength,
500 const unsigned long *mask)
501{
502 if (bitmap_empty(mask, masklength))
503 return NULL;
504 while (*av_masks) {
505 if (bitmap_subset(mask, av_masks, masklength))
506 return av_masks;
507 av_masks += BITS_TO_LONGS(masklength);
508 }
509 return NULL;
510}
511
512static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
513 bool timestamp)
514{
515 const struct iio_chan_spec *ch;
516 unsigned bytes = 0;
517 int length, i;
518
519 /* How much space will the demuxed element take? */
520 for_each_set_bit(i, mask,
521 indio_dev->masklength) {
522 ch = iio_find_channel_from_si(indio_dev, i);
523 length = ch->scan_type.storagebits / 8;
524 bytes = ALIGN(bytes, length);
525 bytes += length;
526 }
527 if (timestamp) {
528 ch = iio_find_channel_from_si(indio_dev,
529 indio_dev->scan_index_timestamp);
530 length = ch->scan_type.storagebits / 8;
531 bytes = ALIGN(bytes, length);
532 bytes += length;
533 }
534 return bytes;
535}
536
537int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
538{
539 struct iio_buffer *buffer = indio_dev->buffer;
540 dev_dbg(&indio_dev->dev, "%s\n", __func__);
541
542 /* How much space will the demuxed element take? */
543 indio_dev->scan_bytes =
544 iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
545 buffer->scan_timestamp);
546 buffer->access->set_bytes_per_datum(buffer, indio_dev->scan_bytes);
547
548 /* What scan mask do we actually have ?*/
549 if (indio_dev->available_scan_masks)
550 indio_dev->active_scan_mask =
551 iio_scan_mask_match(indio_dev->available_scan_masks,
552 indio_dev->masklength,
553 buffer->scan_mask);
554 else
555 indio_dev->active_scan_mask = buffer->scan_mask;
556 iio_update_demux(indio_dev);
557
558 if (indio_dev->info->update_scan_mode)
559 return indio_dev->info
560 ->update_scan_mode(indio_dev,
561 indio_dev->active_scan_mask);
562 return 0;
563}
564EXPORT_SYMBOL(iio_sw_buffer_preenable);
565
566/**
567 * iio_scan_mask_set() - set particular bit in the scan mask
568 * @buffer: the buffer whose scan mask we are interested in
569 * @bit: the bit to be set.
570 **/
571int iio_scan_mask_set(struct iio_dev *indio_dev,
572 struct iio_buffer *buffer, int bit)
573{
574 const unsigned long *mask;
575 unsigned long *trialmask;
576
577 trialmask = kmalloc(sizeof(*trialmask)*
578 BITS_TO_LONGS(indio_dev->masklength),
579 GFP_KERNEL);
580
581 if (trialmask == NULL)
582 return -ENOMEM;
583 if (!indio_dev->masklength) {
584 WARN_ON("trying to set scanmask prior to registering buffer\n");
585 kfree(trialmask);
586 return -EINVAL;
587 }
588 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
589 set_bit(bit, trialmask);
590
591 if (indio_dev->available_scan_masks) {
592 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
593 indio_dev->masklength,
594 trialmask);
595 if (!mask) {
596 kfree(trialmask);
597 return -EINVAL;
598 }
599 }
600 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
601
602 kfree(trialmask);
603
604 return 0;
605};
606EXPORT_SYMBOL_GPL(iio_scan_mask_set);
607
608int iio_scan_mask_query(struct iio_dev *indio_dev,
609 struct iio_buffer *buffer, int bit)
610{
611 if (bit > indio_dev->masklength)
612 return -EINVAL;
613
614 if (!buffer->scan_mask)
615 return 0;
616
617 return test_bit(bit, buffer->scan_mask);
618};
619EXPORT_SYMBOL_GPL(iio_scan_mask_query);
620
621/**
622 * struct iio_demux_table() - table describing demux memcpy ops
623 * @from: index to copy from
624 * @to: index to copy to
625 * @length: how many bytes to copy
626 * @l: list head used for management
627 */
628struct iio_demux_table {
629 unsigned from;
630 unsigned to;
631 unsigned length;
632 struct list_head l;
633};
634
635static unsigned char *iio_demux(struct iio_buffer *buffer,
636 unsigned char *datain)
637{
638 struct iio_demux_table *t;
639
640 if (list_empty(&buffer->demux_list))
641 return datain;
642 list_for_each_entry(t, &buffer->demux_list, l)
643 memcpy(buffer->demux_bounce + t->to,
644 datain + t->from, t->length);
645
646 return buffer->demux_bounce;
647}
648
649int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data,
650 s64 timestamp)
651{
652 unsigned char *dataout = iio_demux(buffer, data);
653
654 return buffer->access->store_to(buffer, dataout, timestamp);
655}
656EXPORT_SYMBOL_GPL(iio_push_to_buffer);
657
658static void iio_buffer_demux_free(struct iio_buffer *buffer)
659{
660 struct iio_demux_table *p, *q;
661 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
662 list_del(&p->l);
663 kfree(p);
664 }
665}
666
667int iio_update_demux(struct iio_dev *indio_dev)
668{
669 const struct iio_chan_spec *ch;
670 struct iio_buffer *buffer = indio_dev->buffer;
671 int ret, in_ind = -1, out_ind, length;
672 unsigned in_loc = 0, out_loc = 0;
673 struct iio_demux_table *p;
674
675 /* Clear out any old demux */
676 iio_buffer_demux_free(buffer);
677 kfree(buffer->demux_bounce);
678 buffer->demux_bounce = NULL;
679
680 /* First work out which scan mode we will actually have */
681 if (bitmap_equal(indio_dev->active_scan_mask,
682 buffer->scan_mask,
683 indio_dev->masklength))
684 return 0;
685
686 /* Now we have the two masks, work from least sig and build up sizes */
687 for_each_set_bit(out_ind,
688 indio_dev->active_scan_mask,
689 indio_dev->masklength) {
690 in_ind = find_next_bit(indio_dev->active_scan_mask,
691 indio_dev->masklength,
692 in_ind + 1);
693 while (in_ind != out_ind) {
694 in_ind = find_next_bit(indio_dev->active_scan_mask,
695 indio_dev->masklength,
696 in_ind + 1);
697 ch = iio_find_channel_from_si(indio_dev, in_ind);
698 length = ch->scan_type.storagebits/8;
699 /* Make sure we are aligned */
700 in_loc += length;
701 if (in_loc % length)
702 in_loc += length - in_loc % length;
703 }
704 p = kmalloc(sizeof(*p), GFP_KERNEL);
705 if (p == NULL) {
706 ret = -ENOMEM;
707 goto error_clear_mux_table;
708 }
709 ch = iio_find_channel_from_si(indio_dev, in_ind);
710 length = ch->scan_type.storagebits/8;
711 if (out_loc % length)
712 out_loc += length - out_loc % length;
713 if (in_loc % length)
714 in_loc += length - in_loc % length;
715 p->from = in_loc;
716 p->to = out_loc;
717 p->length = length;
718 list_add_tail(&p->l, &buffer->demux_list);
719 out_loc += length;
720 in_loc += length;
721 }
722 /* Relies on scan_timestamp being last */
723 if (buffer->scan_timestamp) {
724 p = kmalloc(sizeof(*p), GFP_KERNEL);
725 if (p == NULL) {
726 ret = -ENOMEM;
727 goto error_clear_mux_table;
728 }
729 ch = iio_find_channel_from_si(indio_dev,
730 indio_dev->scan_index_timestamp);
731 length = ch->scan_type.storagebits/8;
732 if (out_loc % length)
733 out_loc += length - out_loc % length;
734 if (in_loc % length)
735 in_loc += length - in_loc % length;
736 p->from = in_loc;
737 p->to = out_loc;
738 p->length = length;
739 list_add_tail(&p->l, &buffer->demux_list);
740 out_loc += length;
741 in_loc += length;
742 }
743 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
744 if (buffer->demux_bounce == NULL) {
745 ret = -ENOMEM;
746 goto error_clear_mux_table;
747 }
748 return 0;
749
750error_clear_mux_table:
751 iio_buffer_demux_free(buffer);
752
753 return ret;
754}
755EXPORT_SYMBOL_GPL(iio_update_demux);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
new file mode 100644
index 000000000000..1ddd8861c71b
--- /dev/null
+++ b/drivers/iio/industrialio-core.c
@@ -0,0 +1,913 @@
1/* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * Based on elements of hwmon and input subsystems.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/idr.h>
15#include <linux/kdev_t.h>
16#include <linux/err.h>
17#include <linux/device.h>
18#include <linux/fs.h>
19#include <linux/poll.h>
20#include <linux/sched.h>
21#include <linux/wait.h>
22#include <linux/cdev.h>
23#include <linux/slab.h>
24#include <linux/anon_inodes.h>
25#include <linux/debugfs.h>
26#include <linux/iio/iio.h>
27#include "iio_core.h"
28#include "iio_core_trigger.h"
29#include <linux/iio/sysfs.h>
30#include <linux/iio/events.h>
31
32/* IDA to assign each registered device a unique id*/
33static DEFINE_IDA(iio_ida);
34
35static dev_t iio_devt;
36
37#define IIO_DEV_MAX 256
38struct bus_type iio_bus_type = {
39 .name = "iio",
40};
41EXPORT_SYMBOL(iio_bus_type);
42
43static struct dentry *iio_debugfs_dentry;
44
45static const char * const iio_direction[] = {
46 [0] = "in",
47 [1] = "out",
48};
49
50static const char * const iio_chan_type_name_spec[] = {
51 [IIO_VOLTAGE] = "voltage",
52 [IIO_CURRENT] = "current",
53 [IIO_POWER] = "power",
54 [IIO_ACCEL] = "accel",
55 [IIO_ANGL_VEL] = "anglvel",
56 [IIO_MAGN] = "magn",
57 [IIO_LIGHT] = "illuminance",
58 [IIO_INTENSITY] = "intensity",
59 [IIO_PROXIMITY] = "proximity",
60 [IIO_TEMP] = "temp",
61 [IIO_INCLI] = "incli",
62 [IIO_ROT] = "rot",
63 [IIO_ANGL] = "angl",
64 [IIO_TIMESTAMP] = "timestamp",
65 [IIO_CAPACITANCE] = "capacitance",
66 [IIO_ALTVOLTAGE] = "altvoltage",
67};
68
69static const char * const iio_modifier_names[] = {
70 [IIO_MOD_X] = "x",
71 [IIO_MOD_Y] = "y",
72 [IIO_MOD_Z] = "z",
73 [IIO_MOD_LIGHT_BOTH] = "both",
74 [IIO_MOD_LIGHT_IR] = "ir",
75};
76
77/* relies on pairs of these shared then separate */
78static const char * const iio_chan_info_postfix[] = {
79 [IIO_CHAN_INFO_RAW] = "raw",
80 [IIO_CHAN_INFO_PROCESSED] = "input",
81 [IIO_CHAN_INFO_SCALE] = "scale",
82 [IIO_CHAN_INFO_OFFSET] = "offset",
83 [IIO_CHAN_INFO_CALIBSCALE] = "calibscale",
84 [IIO_CHAN_INFO_CALIBBIAS] = "calibbias",
85 [IIO_CHAN_INFO_PEAK] = "peak_raw",
86 [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale",
87 [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw",
88 [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw",
89 [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY]
90 = "filter_low_pass_3db_frequency",
91 [IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency",
92 [IIO_CHAN_INFO_FREQUENCY] = "frequency",
93 [IIO_CHAN_INFO_PHASE] = "phase",
94 [IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain",
95};
96
97const struct iio_chan_spec
98*iio_find_channel_from_si(struct iio_dev *indio_dev, int si)
99{
100 int i;
101
102 for (i = 0; i < indio_dev->num_channels; i++)
103 if (indio_dev->channels[i].scan_index == si)
104 return &indio_dev->channels[i];
105 return NULL;
106}
107
108/* This turns up an awful lot */
109ssize_t iio_read_const_attr(struct device *dev,
110 struct device_attribute *attr,
111 char *buf)
112{
113 return sprintf(buf, "%s\n", to_iio_const_attr(attr)->string);
114}
115EXPORT_SYMBOL(iio_read_const_attr);
116
117static int __init iio_init(void)
118{
119 int ret;
120
121 /* Register sysfs bus */
122 ret = bus_register(&iio_bus_type);
123 if (ret < 0) {
124 printk(KERN_ERR
125 "%s could not register bus type\n",
126 __FILE__);
127 goto error_nothing;
128 }
129
130 ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
131 if (ret < 0) {
132 printk(KERN_ERR "%s: failed to allocate char dev region\n",
133 __FILE__);
134 goto error_unregister_bus_type;
135 }
136
137 iio_debugfs_dentry = debugfs_create_dir("iio", NULL);
138
139 return 0;
140
141error_unregister_bus_type:
142 bus_unregister(&iio_bus_type);
143error_nothing:
144 return ret;
145}
146
147static void __exit iio_exit(void)
148{
149 if (iio_devt)
150 unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
151 bus_unregister(&iio_bus_type);
152 debugfs_remove(iio_debugfs_dentry);
153}
154
155#if defined(CONFIG_DEBUG_FS)
156static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
157 size_t count, loff_t *ppos)
158{
159 struct iio_dev *indio_dev = file->private_data;
160 char buf[20];
161 unsigned val = 0;
162 ssize_t len;
163 int ret;
164
165 ret = indio_dev->info->debugfs_reg_access(indio_dev,
166 indio_dev->cached_reg_addr,
167 0, &val);
168 if (ret)
169 dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
170
171 len = snprintf(buf, sizeof(buf), "0x%X\n", val);
172
173 return simple_read_from_buffer(userbuf, count, ppos, buf, len);
174}
175
176static ssize_t iio_debugfs_write_reg(struct file *file,
177 const char __user *userbuf, size_t count, loff_t *ppos)
178{
179 struct iio_dev *indio_dev = file->private_data;
180 unsigned reg, val;
181 char buf[80];
182 int ret;
183
184 count = min_t(size_t, count, (sizeof(buf)-1));
185 if (copy_from_user(buf, userbuf, count))
186 return -EFAULT;
187
188 buf[count] = 0;
189
190 ret = sscanf(buf, "%i %i", &reg, &val);
191
192 switch (ret) {
193 case 1:
194 indio_dev->cached_reg_addr = reg;
195 break;
196 case 2:
197 indio_dev->cached_reg_addr = reg;
198 ret = indio_dev->info->debugfs_reg_access(indio_dev, reg,
199 val, NULL);
200 if (ret) {
201 dev_err(indio_dev->dev.parent, "%s: write failed\n",
202 __func__);
203 return ret;
204 }
205 break;
206 default:
207 return -EINVAL;
208 }
209
210 return count;
211}
212
213static const struct file_operations iio_debugfs_reg_fops = {
214 .open = simple_open,
215 .read = iio_debugfs_read_reg,
216 .write = iio_debugfs_write_reg,
217};
218
219static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
220{
221 debugfs_remove_recursive(indio_dev->debugfs_dentry);
222}
223
224static int iio_device_register_debugfs(struct iio_dev *indio_dev)
225{
226 struct dentry *d;
227
228 if (indio_dev->info->debugfs_reg_access == NULL)
229 return 0;
230
231 if (!iio_debugfs_dentry)
232 return 0;
233
234 indio_dev->debugfs_dentry =
235 debugfs_create_dir(dev_name(&indio_dev->dev),
236 iio_debugfs_dentry);
237 if (indio_dev->debugfs_dentry == NULL) {
238 dev_warn(indio_dev->dev.parent,
239 "Failed to create debugfs directory\n");
240 return -EFAULT;
241 }
242
243 d = debugfs_create_file("direct_reg_access", 0644,
244 indio_dev->debugfs_dentry,
245 indio_dev, &iio_debugfs_reg_fops);
246 if (!d) {
247 iio_device_unregister_debugfs(indio_dev);
248 return -ENOMEM;
249 }
250
251 return 0;
252}
253#else
254static int iio_device_register_debugfs(struct iio_dev *indio_dev)
255{
256 return 0;
257}
258
259static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
260{
261}
262#endif /* CONFIG_DEBUG_FS */
263
264static ssize_t iio_read_channel_ext_info(struct device *dev,
265 struct device_attribute *attr,
266 char *buf)
267{
268 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
269 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
270 const struct iio_chan_spec_ext_info *ext_info;
271
272 ext_info = &this_attr->c->ext_info[this_attr->address];
273
274 return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf);
275}
276
277static ssize_t iio_write_channel_ext_info(struct device *dev,
278 struct device_attribute *attr,
279 const char *buf,
280 size_t len)
281{
282 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
283 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
284 const struct iio_chan_spec_ext_info *ext_info;
285
286 ext_info = &this_attr->c->ext_info[this_attr->address];
287
288 return ext_info->write(indio_dev, ext_info->private,
289 this_attr->c, buf, len);
290}
291
292static ssize_t iio_read_channel_info(struct device *dev,
293 struct device_attribute *attr,
294 char *buf)
295{
296 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
297 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
298 int val, val2;
299 bool scale_db = false;
300 int ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
301 &val, &val2, this_attr->address);
302
303 if (ret < 0)
304 return ret;
305
306 switch (ret) {
307 case IIO_VAL_INT:
308 return sprintf(buf, "%d\n", val);
309 case IIO_VAL_INT_PLUS_MICRO_DB:
310 scale_db = true;
311 case IIO_VAL_INT_PLUS_MICRO:
312 if (val2 < 0)
313 return sprintf(buf, "-%d.%06u%s\n", val, -val2,
314 scale_db ? " dB" : "");
315 else
316 return sprintf(buf, "%d.%06u%s\n", val, val2,
317 scale_db ? " dB" : "");
318 case IIO_VAL_INT_PLUS_NANO:
319 if (val2 < 0)
320 return sprintf(buf, "-%d.%09u\n", val, -val2);
321 else
322 return sprintf(buf, "%d.%09u\n", val, val2);
323 default:
324 return 0;
325 }
326}
327
328static ssize_t iio_write_channel_info(struct device *dev,
329 struct device_attribute *attr,
330 const char *buf,
331 size_t len)
332{
333 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
334 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
335 int ret, integer = 0, fract = 0, fract_mult = 100000;
336 bool integer_part = true, negative = false;
337
338 /* Assumes decimal - precision based on number of digits */
339 if (!indio_dev->info->write_raw)
340 return -EINVAL;
341
342 if (indio_dev->info->write_raw_get_fmt)
343 switch (indio_dev->info->write_raw_get_fmt(indio_dev,
344 this_attr->c, this_attr->address)) {
345 case IIO_VAL_INT_PLUS_MICRO:
346 fract_mult = 100000;
347 break;
348 case IIO_VAL_INT_PLUS_NANO:
349 fract_mult = 100000000;
350 break;
351 default:
352 return -EINVAL;
353 }
354
355 if (buf[0] == '-') {
356 negative = true;
357 buf++;
358 }
359
360 while (*buf) {
361 if ('0' <= *buf && *buf <= '9') {
362 if (integer_part)
363 integer = integer*10 + *buf - '0';
364 else {
365 fract += fract_mult*(*buf - '0');
366 if (fract_mult == 1)
367 break;
368 fract_mult /= 10;
369 }
370 } else if (*buf == '\n') {
371 if (*(buf + 1) == '\0')
372 break;
373 else
374 return -EINVAL;
375 } else if (*buf == '.') {
376 integer_part = false;
377 } else {
378 return -EINVAL;
379 }
380 buf++;
381 }
382 if (negative) {
383 if (integer)
384 integer = -integer;
385 else
386 fract = -fract;
387 }
388
389 ret = indio_dev->info->write_raw(indio_dev, this_attr->c,
390 integer, fract, this_attr->address);
391 if (ret)
392 return ret;
393
394 return len;
395}
396
397static
398int __iio_device_attr_init(struct device_attribute *dev_attr,
399 const char *postfix,
400 struct iio_chan_spec const *chan,
401 ssize_t (*readfunc)(struct device *dev,
402 struct device_attribute *attr,
403 char *buf),
404 ssize_t (*writefunc)(struct device *dev,
405 struct device_attribute *attr,
406 const char *buf,
407 size_t len),
408 bool generic)
409{
410 int ret;
411 char *name_format, *full_postfix;
412 sysfs_attr_init(&dev_attr->attr);
413
414 /* Build up postfix of <extend_name>_<modifier>_postfix */
415 if (chan->modified && !generic) {
416 if (chan->extend_name)
417 full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
418 iio_modifier_names[chan
419 ->channel2],
420 chan->extend_name,
421 postfix);
422 else
423 full_postfix = kasprintf(GFP_KERNEL, "%s_%s",
424 iio_modifier_names[chan
425 ->channel2],
426 postfix);
427 } else {
428 if (chan->extend_name == NULL)
429 full_postfix = kstrdup(postfix, GFP_KERNEL);
430 else
431 full_postfix = kasprintf(GFP_KERNEL,
432 "%s_%s",
433 chan->extend_name,
434 postfix);
435 }
436 if (full_postfix == NULL) {
437 ret = -ENOMEM;
438 goto error_ret;
439 }
440
441 if (chan->differential) { /* Differential can not have modifier */
442 if (generic)
443 name_format
444 = kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
445 iio_direction[chan->output],
446 iio_chan_type_name_spec[chan->type],
447 iio_chan_type_name_spec[chan->type],
448 full_postfix);
449 else if (chan->indexed)
450 name_format
451 = kasprintf(GFP_KERNEL, "%s_%s%d-%s%d_%s",
452 iio_direction[chan->output],
453 iio_chan_type_name_spec[chan->type],
454 chan->channel,
455 iio_chan_type_name_spec[chan->type],
456 chan->channel2,
457 full_postfix);
458 else {
459 WARN_ON("Differential channels must be indexed\n");
460 ret = -EINVAL;
461 goto error_free_full_postfix;
462 }
463 } else { /* Single ended */
464 if (generic)
465 name_format
466 = kasprintf(GFP_KERNEL, "%s_%s_%s",
467 iio_direction[chan->output],
468 iio_chan_type_name_spec[chan->type],
469 full_postfix);
470 else if (chan->indexed)
471 name_format
472 = kasprintf(GFP_KERNEL, "%s_%s%d_%s",
473 iio_direction[chan->output],
474 iio_chan_type_name_spec[chan->type],
475 chan->channel,
476 full_postfix);
477 else
478 name_format
479 = kasprintf(GFP_KERNEL, "%s_%s_%s",
480 iio_direction[chan->output],
481 iio_chan_type_name_spec[chan->type],
482 full_postfix);
483 }
484 if (name_format == NULL) {
485 ret = -ENOMEM;
486 goto error_free_full_postfix;
487 }
488 dev_attr->attr.name = kasprintf(GFP_KERNEL,
489 name_format,
490 chan->channel,
491 chan->channel2);
492 if (dev_attr->attr.name == NULL) {
493 ret = -ENOMEM;
494 goto error_free_name_format;
495 }
496
497 if (readfunc) {
498 dev_attr->attr.mode |= S_IRUGO;
499 dev_attr->show = readfunc;
500 }
501
502 if (writefunc) {
503 dev_attr->attr.mode |= S_IWUSR;
504 dev_attr->store = writefunc;
505 }
506 kfree(name_format);
507 kfree(full_postfix);
508
509 return 0;
510
511error_free_name_format:
512 kfree(name_format);
513error_free_full_postfix:
514 kfree(full_postfix);
515error_ret:
516 return ret;
517}
518
519static void __iio_device_attr_deinit(struct device_attribute *dev_attr)
520{
521 kfree(dev_attr->attr.name);
522}
523
524int __iio_add_chan_devattr(const char *postfix,
525 struct iio_chan_spec const *chan,
526 ssize_t (*readfunc)(struct device *dev,
527 struct device_attribute *attr,
528 char *buf),
529 ssize_t (*writefunc)(struct device *dev,
530 struct device_attribute *attr,
531 const char *buf,
532 size_t len),
533 u64 mask,
534 bool generic,
535 struct device *dev,
536 struct list_head *attr_list)
537{
538 int ret;
539 struct iio_dev_attr *iio_attr, *t;
540
541 iio_attr = kzalloc(sizeof *iio_attr, GFP_KERNEL);
542 if (iio_attr == NULL) {
543 ret = -ENOMEM;
544 goto error_ret;
545 }
546 ret = __iio_device_attr_init(&iio_attr->dev_attr,
547 postfix, chan,
548 readfunc, writefunc, generic);
549 if (ret)
550 goto error_iio_dev_attr_free;
551 iio_attr->c = chan;
552 iio_attr->address = mask;
553 list_for_each_entry(t, attr_list, l)
554 if (strcmp(t->dev_attr.attr.name,
555 iio_attr->dev_attr.attr.name) == 0) {
556 if (!generic)
557 dev_err(dev, "tried to double register : %s\n",
558 t->dev_attr.attr.name);
559 ret = -EBUSY;
560 goto error_device_attr_deinit;
561 }
562 list_add(&iio_attr->l, attr_list);
563
564 return 0;
565
566error_device_attr_deinit:
567 __iio_device_attr_deinit(&iio_attr->dev_attr);
568error_iio_dev_attr_free:
569 kfree(iio_attr);
570error_ret:
571 return ret;
572}
573
574static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
575 struct iio_chan_spec const *chan)
576{
577 int ret, attrcount = 0;
578 int i;
579 const struct iio_chan_spec_ext_info *ext_info;
580
581 if (chan->channel < 0)
582 return 0;
583 for_each_set_bit(i, &chan->info_mask, sizeof(long)*8) {
584 ret = __iio_add_chan_devattr(iio_chan_info_postfix[i/2],
585 chan,
586 &iio_read_channel_info,
587 &iio_write_channel_info,
588 i/2,
589 !(i%2),
590 &indio_dev->dev,
591 &indio_dev->channel_attr_list);
592 if (ret == -EBUSY && (i%2 == 0)) {
593 ret = 0;
594 continue;
595 }
596 if (ret < 0)
597 goto error_ret;
598 attrcount++;
599 }
600
601 if (chan->ext_info) {
602 unsigned int i = 0;
603 for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
604 ret = __iio_add_chan_devattr(ext_info->name,
605 chan,
606 ext_info->read ?
607 &iio_read_channel_ext_info : NULL,
608 ext_info->write ?
609 &iio_write_channel_ext_info : NULL,
610 i,
611 ext_info->shared,
612 &indio_dev->dev,
613 &indio_dev->channel_attr_list);
614 i++;
615 if (ret == -EBUSY && ext_info->shared)
616 continue;
617
618 if (ret)
619 goto error_ret;
620
621 attrcount++;
622 }
623 }
624
625 ret = attrcount;
626error_ret:
627 return ret;
628}
629
630static void iio_device_remove_and_free_read_attr(struct iio_dev *indio_dev,
631 struct iio_dev_attr *p)
632{
633 kfree(p->dev_attr.attr.name);
634 kfree(p);
635}
636
637static ssize_t iio_show_dev_name(struct device *dev,
638 struct device_attribute *attr,
639 char *buf)
640{
641 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
642 return sprintf(buf, "%s\n", indio_dev->name);
643}
644
645static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL);
646
647static int iio_device_register_sysfs(struct iio_dev *indio_dev)
648{
649 int i, ret = 0, attrcount, attrn, attrcount_orig = 0;
650 struct iio_dev_attr *p, *n;
651 struct attribute **attr;
652
653 /* First count elements in any existing group */
654 if (indio_dev->info->attrs) {
655 attr = indio_dev->info->attrs->attrs;
656 while (*attr++ != NULL)
657 attrcount_orig++;
658 }
659 attrcount = attrcount_orig;
660 /*
661 * New channel registration method - relies on the fact a group does
662 * not need to be initialized if it is name is NULL.
663 */
664 INIT_LIST_HEAD(&indio_dev->channel_attr_list);
665 if (indio_dev->channels)
666 for (i = 0; i < indio_dev->num_channels; i++) {
667 ret = iio_device_add_channel_sysfs(indio_dev,
668 &indio_dev
669 ->channels[i]);
670 if (ret < 0)
671 goto error_clear_attrs;
672 attrcount += ret;
673 }
674
675 if (indio_dev->name)
676 attrcount++;
677
678 indio_dev->chan_attr_group.attrs = kcalloc(attrcount + 1,
679 sizeof(indio_dev->chan_attr_group.attrs[0]),
680 GFP_KERNEL);
681 if (indio_dev->chan_attr_group.attrs == NULL) {
682 ret = -ENOMEM;
683 goto error_clear_attrs;
684 }
685 /* Copy across original attributes */
686 if (indio_dev->info->attrs)
687 memcpy(indio_dev->chan_attr_group.attrs,
688 indio_dev->info->attrs->attrs,
689 sizeof(indio_dev->chan_attr_group.attrs[0])
690 *attrcount_orig);
691 attrn = attrcount_orig;
692 /* Add all elements from the list. */
693 list_for_each_entry(p, &indio_dev->channel_attr_list, l)
694 indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
695 if (indio_dev->name)
696 indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
697
698 indio_dev->groups[indio_dev->groupcounter++] =
699 &indio_dev->chan_attr_group;
700
701 return 0;
702
703error_clear_attrs:
704 list_for_each_entry_safe(p, n,
705 &indio_dev->channel_attr_list, l) {
706 list_del(&p->l);
707 iio_device_remove_and_free_read_attr(indio_dev, p);
708 }
709
710 return ret;
711}
712
713static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
714{
715
716 struct iio_dev_attr *p, *n;
717
718 list_for_each_entry_safe(p, n, &indio_dev->channel_attr_list, l) {
719 list_del(&p->l);
720 iio_device_remove_and_free_read_attr(indio_dev, p);
721 }
722 kfree(indio_dev->chan_attr_group.attrs);
723}
724
725static void iio_dev_release(struct device *device)
726{
727 struct iio_dev *indio_dev = dev_to_iio_dev(device);
728 cdev_del(&indio_dev->chrdev);
729 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
730 iio_device_unregister_trigger_consumer(indio_dev);
731 iio_device_unregister_eventset(indio_dev);
732 iio_device_unregister_sysfs(indio_dev);
733 iio_device_unregister_debugfs(indio_dev);
734}
735
736static struct device_type iio_dev_type = {
737 .name = "iio_device",
738 .release = iio_dev_release,
739};
740
741struct iio_dev *iio_device_alloc(int sizeof_priv)
742{
743 struct iio_dev *dev;
744 size_t alloc_size;
745
746 alloc_size = sizeof(struct iio_dev);
747 if (sizeof_priv) {
748 alloc_size = ALIGN(alloc_size, IIO_ALIGN);
749 alloc_size += sizeof_priv;
750 }
751 /* ensure 32-byte alignment of whole construct ? */
752 alloc_size += IIO_ALIGN - 1;
753
754 dev = kzalloc(alloc_size, GFP_KERNEL);
755
756 if (dev) {
757 dev->dev.groups = dev->groups;
758 dev->dev.type = &iio_dev_type;
759 dev->dev.bus = &iio_bus_type;
760 device_initialize(&dev->dev);
761 dev_set_drvdata(&dev->dev, (void *)dev);
762 mutex_init(&dev->mlock);
763 mutex_init(&dev->info_exist_lock);
764
765 dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
766 if (dev->id < 0) {
767 /* cannot use a dev_err as the name isn't available */
768 printk(KERN_ERR "Failed to get id\n");
769 kfree(dev);
770 return NULL;
771 }
772 dev_set_name(&dev->dev, "iio:device%d", dev->id);
773 }
774
775 return dev;
776}
777EXPORT_SYMBOL(iio_device_alloc);
778
779void iio_device_free(struct iio_dev *dev)
780{
781 if (dev) {
782 ida_simple_remove(&iio_ida, dev->id);
783 kfree(dev);
784 }
785}
786EXPORT_SYMBOL(iio_device_free);
787
788/**
789 * iio_chrdev_open() - chrdev file open for buffer access and ioctls
790 **/
791static int iio_chrdev_open(struct inode *inode, struct file *filp)
792{
793 struct iio_dev *indio_dev = container_of(inode->i_cdev,
794 struct iio_dev, chrdev);
795
796 if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags))
797 return -EBUSY;
798
799 filp->private_data = indio_dev;
800
801 return 0;
802}
803
804/**
805 * iio_chrdev_release() - chrdev file close buffer access and ioctls
806 **/
807static int iio_chrdev_release(struct inode *inode, struct file *filp)
808{
809 struct iio_dev *indio_dev = container_of(inode->i_cdev,
810 struct iio_dev, chrdev);
811 clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags);
812 return 0;
813}
814
815/* Somewhat of a cross file organization violation - ioctls here are actually
816 * event related */
817static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
818{
819 struct iio_dev *indio_dev = filp->private_data;
820 int __user *ip = (int __user *)arg;
821 int fd;
822
823 if (cmd == IIO_GET_EVENT_FD_IOCTL) {
824 fd = iio_event_getfd(indio_dev);
825 if (copy_to_user(ip, &fd, sizeof(fd)))
826 return -EFAULT;
827 return 0;
828 }
829 return -EINVAL;
830}
831
832static const struct file_operations iio_buffer_fileops = {
833 .read = iio_buffer_read_first_n_outer_addr,
834 .release = iio_chrdev_release,
835 .open = iio_chrdev_open,
836 .poll = iio_buffer_poll_addr,
837 .owner = THIS_MODULE,
838 .llseek = noop_llseek,
839 .unlocked_ioctl = iio_ioctl,
840 .compat_ioctl = iio_ioctl,
841};
842
843static const struct iio_buffer_setup_ops noop_ring_setup_ops;
844
845int iio_device_register(struct iio_dev *indio_dev)
846{
847 int ret;
848
849 /* configure elements for the chrdev */
850 indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id);
851
852 ret = iio_device_register_debugfs(indio_dev);
853 if (ret) {
854 dev_err(indio_dev->dev.parent,
855 "Failed to register debugfs interfaces\n");
856 goto error_ret;
857 }
858 ret = iio_device_register_sysfs(indio_dev);
859 if (ret) {
860 dev_err(indio_dev->dev.parent,
861 "Failed to register sysfs interfaces\n");
862 goto error_unreg_debugfs;
863 }
864 ret = iio_device_register_eventset(indio_dev);
865 if (ret) {
866 dev_err(indio_dev->dev.parent,
867 "Failed to register event set\n");
868 goto error_free_sysfs;
869 }
870 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
871 iio_device_register_trigger_consumer(indio_dev);
872
873 if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) &&
874 indio_dev->setup_ops == NULL)
875 indio_dev->setup_ops = &noop_ring_setup_ops;
876
877 ret = device_add(&indio_dev->dev);
878 if (ret < 0)
879 goto error_unreg_eventset;
880 cdev_init(&indio_dev->chrdev, &iio_buffer_fileops);
881 indio_dev->chrdev.owner = indio_dev->info->driver_module;
882 ret = cdev_add(&indio_dev->chrdev, indio_dev->dev.devt, 1);
883 if (ret < 0)
884 goto error_del_device;
885 return 0;
886
887error_del_device:
888 device_del(&indio_dev->dev);
889error_unreg_eventset:
890 iio_device_unregister_eventset(indio_dev);
891error_free_sysfs:
892 iio_device_unregister_sysfs(indio_dev);
893error_unreg_debugfs:
894 iio_device_unregister_debugfs(indio_dev);
895error_ret:
896 return ret;
897}
898EXPORT_SYMBOL(iio_device_register);
899
900void iio_device_unregister(struct iio_dev *indio_dev)
901{
902 mutex_lock(&indio_dev->info_exist_lock);
903 indio_dev->info = NULL;
904 mutex_unlock(&indio_dev->info_exist_lock);
905 device_unregister(&indio_dev->dev);
906}
907EXPORT_SYMBOL(iio_device_unregister);
908subsys_initcall(iio_init);
909module_exit(iio_exit);
910
911MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
912MODULE_DESCRIPTION("Industrial I/O core");
913MODULE_LICENSE("GPL");
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
new file mode 100644
index 000000000000..b49059de5d02
--- /dev/null
+++ b/drivers/iio/industrialio-event.c
@@ -0,0 +1,453 @@
1/* Industrial I/O event handling
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * Based on elements of hwmon and input subsystems.
10 */
11
12#include <linux/anon_inodes.h>
13#include <linux/device.h>
14#include <linux/fs.h>
15#include <linux/kernel.h>
16#include <linux/kfifo.h>
17#include <linux/module.h>
18#include <linux/poll.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/uaccess.h>
22#include <linux/wait.h>
23#include <linux/iio/iio.h>
24#include "iio_core.h"
25#include <linux/iio/sysfs.h>
26#include <linux/iio/events.h>
27
28/**
29 * struct iio_event_interface - chrdev interface for an event line
30 * @wait: wait queue to allow blocking reads of events
31 * @det_events: list of detected events
32 * @dev_attr_list: list of event interface sysfs attribute
33 * @flags: file operations related flags including busy flag.
34 * @group: event interface sysfs attribute group
35 */
36struct iio_event_interface {
37 wait_queue_head_t wait;
38 DECLARE_KFIFO(det_events, struct iio_event_data, 16);
39
40 struct list_head dev_attr_list;
41 unsigned long flags;
42 struct attribute_group group;
43};
44
45int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
46{
47 struct iio_event_interface *ev_int = indio_dev->event_interface;
48 struct iio_event_data ev;
49 int copied;
50
51 /* Does anyone care? */
52 spin_lock(&ev_int->wait.lock);
53 if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
54
55 ev.id = ev_code;
56 ev.timestamp = timestamp;
57
58 copied = kfifo_put(&ev_int->det_events, &ev);
59 if (copied != 0)
60 wake_up_locked_poll(&ev_int->wait, POLLIN);
61 }
62 spin_unlock(&ev_int->wait.lock);
63
64 return 0;
65}
66EXPORT_SYMBOL(iio_push_event);
67
68/**
69 * iio_event_poll() - poll the event queue to find out if it has data
70 */
71static unsigned int iio_event_poll(struct file *filep,
72 struct poll_table_struct *wait)
73{
74 struct iio_event_interface *ev_int = filep->private_data;
75 unsigned int events = 0;
76
77 poll_wait(filep, &ev_int->wait, wait);
78
79 spin_lock(&ev_int->wait.lock);
80 if (!kfifo_is_empty(&ev_int->det_events))
81 events = POLLIN | POLLRDNORM;
82 spin_unlock(&ev_int->wait.lock);
83
84 return events;
85}
86
87static ssize_t iio_event_chrdev_read(struct file *filep,
88 char __user *buf,
89 size_t count,
90 loff_t *f_ps)
91{
92 struct iio_event_interface *ev_int = filep->private_data;
93 unsigned int copied;
94 int ret;
95
96 if (count < sizeof(struct iio_event_data))
97 return -EINVAL;
98
99 spin_lock(&ev_int->wait.lock);
100 if (kfifo_is_empty(&ev_int->det_events)) {
101 if (filep->f_flags & O_NONBLOCK) {
102 ret = -EAGAIN;
103 goto error_unlock;
104 }
105 /* Blocking on device; waiting for something to be there */
106 ret = wait_event_interruptible_locked(ev_int->wait,
107 !kfifo_is_empty(&ev_int->det_events));
108 if (ret)
109 goto error_unlock;
110 /* Single access device so no one else can get the data */
111 }
112
113 ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
114
115error_unlock:
116 spin_unlock(&ev_int->wait.lock);
117
118 return ret ? ret : copied;
119}
120
121static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
122{
123 struct iio_event_interface *ev_int = filep->private_data;
124
125 spin_lock(&ev_int->wait.lock);
126 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
127 /*
128 * In order to maintain a clean state for reopening,
129 * clear out any awaiting events. The mask will prevent
130 * any new __iio_push_event calls running.
131 */
132 kfifo_reset_out(&ev_int->det_events);
133 spin_unlock(&ev_int->wait.lock);
134
135 return 0;
136}
137
138static const struct file_operations iio_event_chrdev_fileops = {
139 .read = iio_event_chrdev_read,
140 .poll = iio_event_poll,
141 .release = iio_event_chrdev_release,
142 .owner = THIS_MODULE,
143 .llseek = noop_llseek,
144};
145
146int iio_event_getfd(struct iio_dev *indio_dev)
147{
148 struct iio_event_interface *ev_int = indio_dev->event_interface;
149 int fd;
150
151 if (ev_int == NULL)
152 return -ENODEV;
153
154 spin_lock(&ev_int->wait.lock);
155 if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
156 spin_unlock(&ev_int->wait.lock);
157 return -EBUSY;
158 }
159 spin_unlock(&ev_int->wait.lock);
160 fd = anon_inode_getfd("iio:event",
161 &iio_event_chrdev_fileops, ev_int, O_RDONLY);
162 if (fd < 0) {
163 spin_lock(&ev_int->wait.lock);
164 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
165 spin_unlock(&ev_int->wait.lock);
166 }
167 return fd;
168}
169
170static const char * const iio_ev_type_text[] = {
171 [IIO_EV_TYPE_THRESH] = "thresh",
172 [IIO_EV_TYPE_MAG] = "mag",
173 [IIO_EV_TYPE_ROC] = "roc",
174 [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
175 [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
176};
177
178static const char * const iio_ev_dir_text[] = {
179 [IIO_EV_DIR_EITHER] = "either",
180 [IIO_EV_DIR_RISING] = "rising",
181 [IIO_EV_DIR_FALLING] = "falling"
182};
183
184static ssize_t iio_ev_state_store(struct device *dev,
185 struct device_attribute *attr,
186 const char *buf,
187 size_t len)
188{
189 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
190 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
191 int ret;
192 bool val;
193
194 ret = strtobool(buf, &val);
195 if (ret < 0)
196 return ret;
197
198 ret = indio_dev->info->write_event_config(indio_dev,
199 this_attr->address,
200 val);
201 return (ret < 0) ? ret : len;
202}
203
204static ssize_t iio_ev_state_show(struct device *dev,
205 struct device_attribute *attr,
206 char *buf)
207{
208 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
209 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
210 int val = indio_dev->info->read_event_config(indio_dev,
211 this_attr->address);
212
213 if (val < 0)
214 return val;
215 else
216 return sprintf(buf, "%d\n", val);
217}
218
219static ssize_t iio_ev_value_show(struct device *dev,
220 struct device_attribute *attr,
221 char *buf)
222{
223 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
224 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
225 int val, ret;
226
227 ret = indio_dev->info->read_event_value(indio_dev,
228 this_attr->address, &val);
229 if (ret < 0)
230 return ret;
231
232 return sprintf(buf, "%d\n", val);
233}
234
235static ssize_t iio_ev_value_store(struct device *dev,
236 struct device_attribute *attr,
237 const char *buf,
238 size_t len)
239{
240 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
241 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
242 unsigned long val;
243 int ret;
244
245 if (!indio_dev->info->write_event_value)
246 return -EINVAL;
247
248 ret = strict_strtoul(buf, 10, &val);
249 if (ret)
250 return ret;
251
252 ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
253 val);
254 if (ret < 0)
255 return ret;
256
257 return len;
258}
259
260static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
261 struct iio_chan_spec const *chan)
262{
263 int ret = 0, i, attrcount = 0;
264 u64 mask = 0;
265 char *postfix;
266 if (!chan->event_mask)
267 return 0;
268
269 for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
270 postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
271 iio_ev_type_text[i/IIO_EV_DIR_MAX],
272 iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
273 if (postfix == NULL) {
274 ret = -ENOMEM;
275 goto error_ret;
276 }
277 if (chan->modified)
278 mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
279 i/IIO_EV_DIR_MAX,
280 i%IIO_EV_DIR_MAX);
281 else if (chan->differential)
282 mask = IIO_EVENT_CODE(chan->type,
283 0, 0,
284 i%IIO_EV_DIR_MAX,
285 i/IIO_EV_DIR_MAX,
286 0,
287 chan->channel,
288 chan->channel2);
289 else
290 mask = IIO_UNMOD_EVENT_CODE(chan->type,
291 chan->channel,
292 i/IIO_EV_DIR_MAX,
293 i%IIO_EV_DIR_MAX);
294
295 ret = __iio_add_chan_devattr(postfix,
296 chan,
297 &iio_ev_state_show,
298 iio_ev_state_store,
299 mask,
300 0,
301 &indio_dev->dev,
302 &indio_dev->event_interface->
303 dev_attr_list);
304 kfree(postfix);
305 if (ret)
306 goto error_ret;
307 attrcount++;
308 postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
309 iio_ev_type_text[i/IIO_EV_DIR_MAX],
310 iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
311 if (postfix == NULL) {
312 ret = -ENOMEM;
313 goto error_ret;
314 }
315 ret = __iio_add_chan_devattr(postfix, chan,
316 iio_ev_value_show,
317 iio_ev_value_store,
318 mask,
319 0,
320 &indio_dev->dev,
321 &indio_dev->event_interface->
322 dev_attr_list);
323 kfree(postfix);
324 if (ret)
325 goto error_ret;
326 attrcount++;
327 }
328 ret = attrcount;
329error_ret:
330 return ret;
331}
332
333static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
334{
335 struct iio_dev_attr *p, *n;
336 list_for_each_entry_safe(p, n,
337 &indio_dev->event_interface->
338 dev_attr_list, l) {
339 kfree(p->dev_attr.attr.name);
340 kfree(p);
341 }
342}
343
344static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
345{
346 int j, ret, attrcount = 0;
347
348 INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
349 /* Dynically created from the channels array */
350 for (j = 0; j < indio_dev->num_channels; j++) {
351 ret = iio_device_add_event_sysfs(indio_dev,
352 &indio_dev->channels[j]);
353 if (ret < 0)
354 goto error_clear_attrs;
355 attrcount += ret;
356 }
357 return attrcount;
358
359error_clear_attrs:
360 __iio_remove_event_config_attrs(indio_dev);
361
362 return ret;
363}
364
365static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
366{
367 int j;
368
369 for (j = 0; j < indio_dev->num_channels; j++)
370 if (indio_dev->channels[j].event_mask != 0)
371 return true;
372 return false;
373}
374
375static void iio_setup_ev_int(struct iio_event_interface *ev_int)
376{
377 INIT_KFIFO(ev_int->det_events);
378 init_waitqueue_head(&ev_int->wait);
379}
380
381static const char *iio_event_group_name = "events";
382int iio_device_register_eventset(struct iio_dev *indio_dev)
383{
384 struct iio_dev_attr *p;
385 int ret = 0, attrcount_orig = 0, attrcount, attrn;
386 struct attribute **attr;
387
388 if (!(indio_dev->info->event_attrs ||
389 iio_check_for_dynamic_events(indio_dev)))
390 return 0;
391
392 indio_dev->event_interface =
393 kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
394 if (indio_dev->event_interface == NULL) {
395 ret = -ENOMEM;
396 goto error_ret;
397 }
398
399 iio_setup_ev_int(indio_dev->event_interface);
400 if (indio_dev->info->event_attrs != NULL) {
401 attr = indio_dev->info->event_attrs->attrs;
402 while (*attr++ != NULL)
403 attrcount_orig++;
404 }
405 attrcount = attrcount_orig;
406 if (indio_dev->channels) {
407 ret = __iio_add_event_config_attrs(indio_dev);
408 if (ret < 0)
409 goto error_free_setup_event_lines;
410 attrcount += ret;
411 }
412
413 indio_dev->event_interface->group.name = iio_event_group_name;
414 indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
415 sizeof(indio_dev->event_interface->group.attrs[0]),
416 GFP_KERNEL);
417 if (indio_dev->event_interface->group.attrs == NULL) {
418 ret = -ENOMEM;
419 goto error_free_setup_event_lines;
420 }
421 if (indio_dev->info->event_attrs)
422 memcpy(indio_dev->event_interface->group.attrs,
423 indio_dev->info->event_attrs->attrs,
424 sizeof(indio_dev->event_interface->group.attrs[0])
425 *attrcount_orig);
426 attrn = attrcount_orig;
427 /* Add all elements from the list. */
428 list_for_each_entry(p,
429 &indio_dev->event_interface->dev_attr_list,
430 l)
431 indio_dev->event_interface->group.attrs[attrn++] =
432 &p->dev_attr.attr;
433 indio_dev->groups[indio_dev->groupcounter++] =
434 &indio_dev->event_interface->group;
435
436 return 0;
437
438error_free_setup_event_lines:
439 __iio_remove_event_config_attrs(indio_dev);
440 kfree(indio_dev->event_interface);
441error_ret:
442
443 return ret;
444}
445
446void iio_device_unregister_eventset(struct iio_dev *indio_dev)
447{
448 if (indio_dev->event_interface == NULL)
449 return;
450 __iio_remove_event_config_attrs(indio_dev);
451 kfree(indio_dev->event_interface->group.attrs);
452 kfree(indio_dev->event_interface);
453}
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
new file mode 100644
index 000000000000..0f582df75a19
--- /dev/null
+++ b/drivers/iio/industrialio-trigger.c
@@ -0,0 +1,509 @@
1/* The industrial I/O core, trigger handling functions
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11#include <linux/idr.h>
12#include <linux/err.h>
13#include <linux/device.h>
14#include <linux/interrupt.h>
15#include <linux/list.h>
16#include <linux/slab.h>
17
18#include <linux/iio/iio.h>
19#include <linux/iio/trigger.h>
20#include "iio_core.h"
21#include "iio_core_trigger.h"
22#include <linux/iio/trigger_consumer.h>
23
24/* RFC - Question of approach
25 * Make the common case (single sensor single trigger)
26 * simple by starting trigger capture from when first sensors
27 * is added.
28 *
29 * Complex simultaneous start requires use of 'hold' functionality
30 * of the trigger. (not implemented)
31 *
32 * Any other suggestions?
33 */
34
35static DEFINE_IDA(iio_trigger_ida);
36
37/* Single list of all available triggers */
38static LIST_HEAD(iio_trigger_list);
39static DEFINE_MUTEX(iio_trigger_list_lock);
40
41/**
42 * iio_trigger_read_name() - retrieve useful identifying name
43 **/
44static ssize_t iio_trigger_read_name(struct device *dev,
45 struct device_attribute *attr,
46 char *buf)
47{
48 struct iio_trigger *trig = dev_get_drvdata(dev);
49 return sprintf(buf, "%s\n", trig->name);
50}
51
52static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
53
54/**
55 * iio_trigger_register_sysfs() - create a device for this trigger
56 * @trig_info: the trigger
57 *
58 * Also adds any control attribute registered by the trigger driver
59 **/
60static int iio_trigger_register_sysfs(struct iio_trigger *trig_info)
61{
62 return sysfs_add_file_to_group(&trig_info->dev.kobj,
63 &dev_attr_name.attr,
64 NULL);
65}
66
67static void iio_trigger_unregister_sysfs(struct iio_trigger *trig_info)
68{
69 sysfs_remove_file_from_group(&trig_info->dev.kobj,
70 &dev_attr_name.attr,
71 NULL);
72}
73
74int iio_trigger_register(struct iio_trigger *trig_info)
75{
76 int ret;
77
78 trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
79 if (trig_info->id < 0) {
80 ret = trig_info->id;
81 goto error_ret;
82 }
83 /* Set the name used for the sysfs directory etc */
84 dev_set_name(&trig_info->dev, "trigger%ld",
85 (unsigned long) trig_info->id);
86
87 ret = device_add(&trig_info->dev);
88 if (ret)
89 goto error_unregister_id;
90
91 ret = iio_trigger_register_sysfs(trig_info);
92 if (ret)
93 goto error_device_del;
94
95 /* Add to list of available triggers held by the IIO core */
96 mutex_lock(&iio_trigger_list_lock);
97 list_add_tail(&trig_info->list, &iio_trigger_list);
98 mutex_unlock(&iio_trigger_list_lock);
99
100 return 0;
101
102error_device_del:
103 device_del(&trig_info->dev);
104error_unregister_id:
105 ida_simple_remove(&iio_trigger_ida, trig_info->id);
106error_ret:
107 return ret;
108}
109EXPORT_SYMBOL(iio_trigger_register);
110
111void iio_trigger_unregister(struct iio_trigger *trig_info)
112{
113 mutex_lock(&iio_trigger_list_lock);
114 list_del(&trig_info->list);
115 mutex_unlock(&iio_trigger_list_lock);
116
117 iio_trigger_unregister_sysfs(trig_info);
118 ida_simple_remove(&iio_trigger_ida, trig_info->id);
119 /* Possible issue in here */
120 device_unregister(&trig_info->dev);
121}
122EXPORT_SYMBOL(iio_trigger_unregister);
123
124static struct iio_trigger *iio_trigger_find_by_name(const char *name,
125 size_t len)
126{
127 struct iio_trigger *trig = NULL, *iter;
128
129 mutex_lock(&iio_trigger_list_lock);
130 list_for_each_entry(iter, &iio_trigger_list, list)
131 if (sysfs_streq(iter->name, name)) {
132 trig = iter;
133 break;
134 }
135 mutex_unlock(&iio_trigger_list_lock);
136
137 return trig;
138}
139
140void iio_trigger_poll(struct iio_trigger *trig, s64 time)
141{
142 int i;
143 if (!trig->use_count)
144 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
145 if (trig->subirqs[i].enabled) {
146 trig->use_count++;
147 generic_handle_irq(trig->subirq_base + i);
148 }
149}
150EXPORT_SYMBOL(iio_trigger_poll);
151
152irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
153{
154 iio_trigger_poll(private, iio_get_time_ns());
155 return IRQ_HANDLED;
156}
157EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
158
159void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time)
160{
161 int i;
162 if (!trig->use_count)
163 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
164 if (trig->subirqs[i].enabled) {
165 trig->use_count++;
166 handle_nested_irq(trig->subirq_base + i);
167 }
168}
169EXPORT_SYMBOL(iio_trigger_poll_chained);
170
171void iio_trigger_notify_done(struct iio_trigger *trig)
172{
173 trig->use_count--;
174 if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable)
175 if (trig->ops->try_reenable(trig))
176 /* Missed and interrupt so launch new poll now */
177 iio_trigger_poll(trig, 0);
178}
179EXPORT_SYMBOL(iio_trigger_notify_done);
180
181/* Trigger Consumer related functions */
182static int iio_trigger_get_irq(struct iio_trigger *trig)
183{
184 int ret;
185 mutex_lock(&trig->pool_lock);
186 ret = bitmap_find_free_region(trig->pool,
187 CONFIG_IIO_CONSUMERS_PER_TRIGGER,
188 ilog2(1));
189 mutex_unlock(&trig->pool_lock);
190 if (ret >= 0)
191 ret += trig->subirq_base;
192
193 return ret;
194}
195
196static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
197{
198 mutex_lock(&trig->pool_lock);
199 clear_bit(irq - trig->subirq_base, trig->pool);
200 mutex_unlock(&trig->pool_lock);
201}
202
203/* Complexity in here. With certain triggers (datardy) an acknowledgement
204 * may be needed if the pollfuncs do not include the data read for the
205 * triggering device.
206 * This is not currently handled. Alternative of not enabling trigger unless
207 * the relevant function is in there may be the best option.
208 */
209/* Worth protecting against double additions?*/
210static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
211 struct iio_poll_func *pf)
212{
213 int ret = 0;
214 bool notinuse
215 = bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
216
217 /* Prevent the module being removed whilst attached to a trigger */
218 __module_get(pf->indio_dev->info->driver_module);
219 pf->irq = iio_trigger_get_irq(trig);
220 ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
221 pf->type, pf->name,
222 pf);
223 if (ret < 0) {
224 module_put(pf->indio_dev->info->driver_module);
225 return ret;
226 }
227
228 if (trig->ops && trig->ops->set_trigger_state && notinuse) {
229 ret = trig->ops->set_trigger_state(trig, true);
230 if (ret < 0)
231 module_put(pf->indio_dev->info->driver_module);
232 }
233
234 return ret;
235}
236
237static int iio_trigger_dettach_poll_func(struct iio_trigger *trig,
238 struct iio_poll_func *pf)
239{
240 int ret = 0;
241 bool no_other_users
242 = (bitmap_weight(trig->pool,
243 CONFIG_IIO_CONSUMERS_PER_TRIGGER)
244 == 1);
245 if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
246 ret = trig->ops->set_trigger_state(trig, false);
247 if (ret)
248 goto error_ret;
249 }
250 iio_trigger_put_irq(trig, pf->irq);
251 free_irq(pf->irq, pf);
252 module_put(pf->indio_dev->info->driver_module);
253
254error_ret:
255 return ret;
256}
257
258irqreturn_t iio_pollfunc_store_time(int irq, void *p)
259{
260 struct iio_poll_func *pf = p;
261 pf->timestamp = iio_get_time_ns();
262 return IRQ_WAKE_THREAD;
263}
264EXPORT_SYMBOL(iio_pollfunc_store_time);
265
266struct iio_poll_func
267*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
268 irqreturn_t (*thread)(int irq, void *p),
269 int type,
270 struct iio_dev *indio_dev,
271 const char *fmt,
272 ...)
273{
274 va_list vargs;
275 struct iio_poll_func *pf;
276
277 pf = kmalloc(sizeof *pf, GFP_KERNEL);
278 if (pf == NULL)
279 return NULL;
280 va_start(vargs, fmt);
281 pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
282 va_end(vargs);
283 if (pf->name == NULL) {
284 kfree(pf);
285 return NULL;
286 }
287 pf->h = h;
288 pf->thread = thread;
289 pf->type = type;
290 pf->indio_dev = indio_dev;
291
292 return pf;
293}
294EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
295
296void iio_dealloc_pollfunc(struct iio_poll_func *pf)
297{
298 kfree(pf->name);
299 kfree(pf);
300}
301EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
302
303/**
304 * iio_trigger_read_current() - trigger consumer sysfs query which trigger
305 *
306 * For trigger consumers the current_trigger interface allows the trigger
307 * used by the device to be queried.
308 **/
309static ssize_t iio_trigger_read_current(struct device *dev,
310 struct device_attribute *attr,
311 char *buf)
312{
313 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
314
315 if (indio_dev->trig)
316 return sprintf(buf, "%s\n", indio_dev->trig->name);
317 return 0;
318}
319
320/**
321 * iio_trigger_write_current() trigger consumer sysfs set current trigger
322 *
323 * For trigger consumers the current_trigger interface allows the trigger
324 * used for this device to be specified at run time based on the triggers
325 * name.
326 **/
327static ssize_t iio_trigger_write_current(struct device *dev,
328 struct device_attribute *attr,
329 const char *buf,
330 size_t len)
331{
332 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
333 struct iio_trigger *oldtrig = indio_dev->trig;
334 struct iio_trigger *trig;
335 int ret;
336
337 mutex_lock(&indio_dev->mlock);
338 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
339 mutex_unlock(&indio_dev->mlock);
340 return -EBUSY;
341 }
342 mutex_unlock(&indio_dev->mlock);
343
344 trig = iio_trigger_find_by_name(buf, len);
345 if (oldtrig == trig)
346 return len;
347
348 if (trig && indio_dev->info->validate_trigger) {
349 ret = indio_dev->info->validate_trigger(indio_dev, trig);
350 if (ret)
351 return ret;
352 }
353
354 if (trig && trig->ops && trig->ops->validate_device) {
355 ret = trig->ops->validate_device(trig, indio_dev);
356 if (ret)
357 return ret;
358 }
359
360 indio_dev->trig = trig;
361
362 if (oldtrig && indio_dev->trig != oldtrig)
363 iio_trigger_put(oldtrig);
364 if (indio_dev->trig)
365 iio_trigger_get(indio_dev->trig);
366
367 return len;
368}
369
370static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR,
371 iio_trigger_read_current,
372 iio_trigger_write_current);
373
374static struct attribute *iio_trigger_consumer_attrs[] = {
375 &dev_attr_current_trigger.attr,
376 NULL,
377};
378
379static const struct attribute_group iio_trigger_consumer_attr_group = {
380 .name = "trigger",
381 .attrs = iio_trigger_consumer_attrs,
382};
383
384static void iio_trig_release(struct device *device)
385{
386 struct iio_trigger *trig = to_iio_trigger(device);
387 int i;
388
389 if (trig->subirq_base) {
390 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
391 irq_modify_status(trig->subirq_base + i,
392 IRQ_NOAUTOEN,
393 IRQ_NOREQUEST | IRQ_NOPROBE);
394 irq_set_chip(trig->subirq_base + i,
395 NULL);
396 irq_set_handler(trig->subirq_base + i,
397 NULL);
398 }
399
400 irq_free_descs(trig->subirq_base,
401 CONFIG_IIO_CONSUMERS_PER_TRIGGER);
402 }
403 kfree(trig->name);
404 kfree(trig);
405}
406
407static struct device_type iio_trig_type = {
408 .release = iio_trig_release,
409};
410
411static void iio_trig_subirqmask(struct irq_data *d)
412{
413 struct irq_chip *chip = irq_data_get_irq_chip(d);
414 struct iio_trigger *trig
415 = container_of(chip,
416 struct iio_trigger, subirq_chip);
417 trig->subirqs[d->irq - trig->subirq_base].enabled = false;
418}
419
420static void iio_trig_subirqunmask(struct irq_data *d)
421{
422 struct irq_chip *chip = irq_data_get_irq_chip(d);
423 struct iio_trigger *trig
424 = container_of(chip,
425 struct iio_trigger, subirq_chip);
426 trig->subirqs[d->irq - trig->subirq_base].enabled = true;
427}
428
429struct iio_trigger *iio_trigger_alloc(const char *fmt, ...)
430{
431 va_list vargs;
432 struct iio_trigger *trig;
433 trig = kzalloc(sizeof *trig, GFP_KERNEL);
434 if (trig) {
435 int i;
436 trig->dev.type = &iio_trig_type;
437 trig->dev.bus = &iio_bus_type;
438 device_initialize(&trig->dev);
439 dev_set_drvdata(&trig->dev, (void *)trig);
440
441 mutex_init(&trig->pool_lock);
442 trig->subirq_base
443 = irq_alloc_descs(-1, 0,
444 CONFIG_IIO_CONSUMERS_PER_TRIGGER,
445 0);
446 if (trig->subirq_base < 0) {
447 kfree(trig);
448 return NULL;
449 }
450 va_start(vargs, fmt);
451 trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
452 va_end(vargs);
453 if (trig->name == NULL) {
454 irq_free_descs(trig->subirq_base,
455 CONFIG_IIO_CONSUMERS_PER_TRIGGER);
456 kfree(trig);
457 return NULL;
458 }
459 trig->subirq_chip.name = trig->name;
460 trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
461 trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
462 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
463 irq_set_chip(trig->subirq_base + i,
464 &trig->subirq_chip);
465 irq_set_handler(trig->subirq_base + i,
466 &handle_simple_irq);
467 irq_modify_status(trig->subirq_base + i,
468 IRQ_NOREQUEST | IRQ_NOAUTOEN,
469 IRQ_NOPROBE);
470 }
471 get_device(&trig->dev);
472 }
473 return trig;
474}
475EXPORT_SYMBOL(iio_trigger_alloc);
476
477void iio_trigger_free(struct iio_trigger *trig)
478{
479 if (trig)
480 put_device(&trig->dev);
481}
482EXPORT_SYMBOL(iio_trigger_free);
483
484void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
485{
486 indio_dev->groups[indio_dev->groupcounter++] =
487 &iio_trigger_consumer_attr_group;
488}
489
490void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
491{
492 /* Clean up and associated but not attached triggers references */
493 if (indio_dev->trig)
494 iio_trigger_put(indio_dev->trig);
495}
496
497int iio_triggered_buffer_postenable(struct iio_dev *indio_dev)
498{
499 return iio_trigger_attach_poll_func(indio_dev->trig,
500 indio_dev->pollfunc);
501}
502EXPORT_SYMBOL(iio_triggered_buffer_postenable);
503
504int iio_triggered_buffer_predisable(struct iio_dev *indio_dev)
505{
506 return iio_trigger_dettach_poll_func(indio_dev->trig,
507 indio_dev->pollfunc);
508}
509EXPORT_SYMBOL(iio_triggered_buffer_predisable);
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
new file mode 100644
index 000000000000..922645893dc8
--- /dev/null
+++ b/drivers/iio/inkern.c
@@ -0,0 +1,293 @@
1/* The industrial I/O core in kernel channel mapping
2 *
3 * Copyright (c) 2011 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9#include <linux/err.h>
10#include <linux/export.h>
11#include <linux/slab.h>
12#include <linux/mutex.h>
13
14#include <linux/iio/iio.h>
15#include "iio_core.h"
16#include <linux/iio/machine.h>
17#include <linux/iio/driver.h>
18#include <linux/iio/consumer.h>
19
20struct iio_map_internal {
21 struct iio_dev *indio_dev;
22 struct iio_map *map;
23 struct list_head l;
24};
25
26static LIST_HEAD(iio_map_list);
27static DEFINE_MUTEX(iio_map_list_lock);
28
29int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
30{
31 int i = 0, ret = 0;
32 struct iio_map_internal *mapi;
33
34 if (maps == NULL)
35 return 0;
36
37 mutex_lock(&iio_map_list_lock);
38 while (maps[i].consumer_dev_name != NULL) {
39 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
40 if (mapi == NULL) {
41 ret = -ENOMEM;
42 goto error_ret;
43 }
44 mapi->map = &maps[i];
45 mapi->indio_dev = indio_dev;
46 list_add(&mapi->l, &iio_map_list);
47 i++;
48 }
49error_ret:
50 mutex_unlock(&iio_map_list_lock);
51
52 return ret;
53}
54EXPORT_SYMBOL_GPL(iio_map_array_register);
55
56
57/* Assumes the exact same array (e.g. memory locations)
58 * used at unregistration as used at registration rather than
59 * more complex checking of contents.
60 */
61int iio_map_array_unregister(struct iio_dev *indio_dev,
62 struct iio_map *maps)
63{
64 int i = 0, ret = 0;
65 bool found_it;
66 struct iio_map_internal *mapi;
67
68 if (maps == NULL)
69 return 0;
70
71 mutex_lock(&iio_map_list_lock);
72 while (maps[i].consumer_dev_name != NULL) {
73 found_it = false;
74 list_for_each_entry(mapi, &iio_map_list, l)
75 if (&maps[i] == mapi->map) {
76 list_del(&mapi->l);
77 kfree(mapi);
78 found_it = true;
79 break;
80 }
81 if (found_it == false) {
82 ret = -ENODEV;
83 goto error_ret;
84 }
85 i++;
86 }
87error_ret:
88 mutex_unlock(&iio_map_list_lock);
89
90 return ret;
91}
92EXPORT_SYMBOL_GPL(iio_map_array_unregister);
93
94static const struct iio_chan_spec
95*iio_chan_spec_from_name(const struct iio_dev *indio_dev,
96 const char *name)
97{
98 int i;
99 const struct iio_chan_spec *chan = NULL;
100
101 for (i = 0; i < indio_dev->num_channels; i++)
102 if (indio_dev->channels[i].datasheet_name &&
103 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
104 chan = &indio_dev->channels[i];
105 break;
106 }
107 return chan;
108}
109
110
111struct iio_channel *iio_st_channel_get(const char *name,
112 const char *channel_name)
113{
114 struct iio_map_internal *c_i = NULL, *c = NULL;
115 struct iio_channel *channel;
116
117 if (name == NULL && channel_name == NULL)
118 return ERR_PTR(-ENODEV);
119
120 /* first find matching entry the channel map */
121 mutex_lock(&iio_map_list_lock);
122 list_for_each_entry(c_i, &iio_map_list, l) {
123 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
124 (channel_name &&
125 strcmp(channel_name, c_i->map->consumer_channel) != 0))
126 continue;
127 c = c_i;
128 get_device(&c->indio_dev->dev);
129 break;
130 }
131 mutex_unlock(&iio_map_list_lock);
132 if (c == NULL)
133 return ERR_PTR(-ENODEV);
134
135 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
136 if (channel == NULL)
137 return ERR_PTR(-ENOMEM);
138
139 channel->indio_dev = c->indio_dev;
140
141 if (c->map->adc_channel_label)
142 channel->channel =
143 iio_chan_spec_from_name(channel->indio_dev,
144 c->map->adc_channel_label);
145
146 return channel;
147}
148EXPORT_SYMBOL_GPL(iio_st_channel_get);
149
150void iio_st_channel_release(struct iio_channel *channel)
151{
152 put_device(&channel->indio_dev->dev);
153 kfree(channel);
154}
155EXPORT_SYMBOL_GPL(iio_st_channel_release);
156
157struct iio_channel *iio_st_channel_get_all(const char *name)
158{
159 struct iio_channel *chans;
160 struct iio_map_internal *c = NULL;
161 int nummaps = 0;
162 int mapind = 0;
163 int i, ret;
164
165 if (name == NULL)
166 return ERR_PTR(-EINVAL);
167
168 mutex_lock(&iio_map_list_lock);
169 /* first count the matching maps */
170 list_for_each_entry(c, &iio_map_list, l)
171 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
172 continue;
173 else
174 nummaps++;
175
176 if (nummaps == 0) {
177 ret = -ENODEV;
178 goto error_ret;
179 }
180
181 /* NULL terminated array to save passing size */
182 chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
183 if (chans == NULL) {
184 ret = -ENOMEM;
185 goto error_ret;
186 }
187
188 /* for each map fill in the chans element */
189 list_for_each_entry(c, &iio_map_list, l) {
190 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
191 continue;
192 chans[mapind].indio_dev = c->indio_dev;
193 chans[mapind].channel =
194 iio_chan_spec_from_name(chans[mapind].indio_dev,
195 c->map->adc_channel_label);
196 if (chans[mapind].channel == NULL) {
197 ret = -EINVAL;
198 put_device(&chans[mapind].indio_dev->dev);
199 goto error_free_chans;
200 }
201 get_device(&chans[mapind].indio_dev->dev);
202 mapind++;
203 }
204 mutex_unlock(&iio_map_list_lock);
205 if (mapind == 0) {
206 ret = -ENODEV;
207 goto error_free_chans;
208 }
209 return chans;
210
211error_free_chans:
212 for (i = 0; i < nummaps; i++)
213 if (chans[i].indio_dev)
214 put_device(&chans[i].indio_dev->dev);
215 kfree(chans);
216error_ret:
217 mutex_unlock(&iio_map_list_lock);
218
219 return ERR_PTR(ret);
220}
221EXPORT_SYMBOL_GPL(iio_st_channel_get_all);
222
223void iio_st_channel_release_all(struct iio_channel *channels)
224{
225 struct iio_channel *chan = &channels[0];
226
227 while (chan->indio_dev) {
228 put_device(&chan->indio_dev->dev);
229 chan++;
230 }
231 kfree(channels);
232}
233EXPORT_SYMBOL_GPL(iio_st_channel_release_all);
234
235int iio_st_read_channel_raw(struct iio_channel *chan, int *val)
236{
237 int val2, ret;
238
239 mutex_lock(&chan->indio_dev->info_exist_lock);
240 if (chan->indio_dev->info == NULL) {
241 ret = -ENODEV;
242 goto err_unlock;
243 }
244
245 ret = chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
246 val, &val2, 0);
247err_unlock:
248 mutex_unlock(&chan->indio_dev->info_exist_lock);
249
250 return ret;
251}
252EXPORT_SYMBOL_GPL(iio_st_read_channel_raw);
253
254int iio_st_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
255{
256 int ret;
257
258 mutex_lock(&chan->indio_dev->info_exist_lock);
259 if (chan->indio_dev->info == NULL) {
260 ret = -ENODEV;
261 goto err_unlock;
262 }
263
264 ret = chan->indio_dev->info->read_raw(chan->indio_dev,
265 chan->channel,
266 val, val2,
267 IIO_CHAN_INFO_SCALE);
268err_unlock:
269 mutex_unlock(&chan->indio_dev->info_exist_lock);
270
271 return ret;
272}
273EXPORT_SYMBOL_GPL(iio_st_read_channel_scale);
274
275int iio_st_get_channel_type(struct iio_channel *chan,
276 enum iio_chan_type *type)
277{
278 int ret = 0;
279 /* Need to verify underlying driver has not gone away */
280
281 mutex_lock(&chan->indio_dev->info_exist_lock);
282 if (chan->indio_dev->info == NULL) {
283 ret = -ENODEV;
284 goto err_unlock;
285 }
286
287 *type = chan->channel->type;
288err_unlock:
289 mutex_unlock(&chan->indio_dev->info_exist_lock);
290
291 return ret;
292}
293EXPORT_SYMBOL_GPL(iio_st_get_channel_type);
diff --git a/drivers/iio/kfifo_buf.c b/drivers/iio/kfifo_buf.c
new file mode 100644
index 000000000000..6bf9d05f4841
--- /dev/null
+++ b/drivers/iio/kfifo_buf.c
@@ -0,0 +1,150 @@
1#include <linux/slab.h>
2#include <linux/kernel.h>
3#include <linux/module.h>
4#include <linux/device.h>
5#include <linux/workqueue.h>
6#include <linux/kfifo.h>
7#include <linux/mutex.h>
8#include <linux/iio/kfifo_buf.h>
9
10struct iio_kfifo {
11 struct iio_buffer buffer;
12 struct kfifo kf;
13 int update_needed;
14};
15
16#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
17
18static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
19 int bytes_per_datum, int length)
20{
21 if ((length == 0) || (bytes_per_datum == 0))
22 return -EINVAL;
23
24 __iio_update_buffer(&buf->buffer, bytes_per_datum, length);
25 return kfifo_alloc(&buf->kf, bytes_per_datum*length, GFP_KERNEL);
26}
27
28static int iio_request_update_kfifo(struct iio_buffer *r)
29{
30 int ret = 0;
31 struct iio_kfifo *buf = iio_to_kfifo(r);
32
33 if (!buf->update_needed)
34 goto error_ret;
35 kfifo_free(&buf->kf);
36 ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
37 buf->buffer.length);
38error_ret:
39 return ret;
40}
41
42static int iio_get_length_kfifo(struct iio_buffer *r)
43{
44 return r->length;
45}
46
47static IIO_BUFFER_ENABLE_ATTR;
48static IIO_BUFFER_LENGTH_ATTR;
49
50static struct attribute *iio_kfifo_attributes[] = {
51 &dev_attr_length.attr,
52 &dev_attr_enable.attr,
53 NULL,
54};
55
56static struct attribute_group iio_kfifo_attribute_group = {
57 .attrs = iio_kfifo_attributes,
58 .name = "buffer",
59};
60
61static int iio_get_bytes_per_datum_kfifo(struct iio_buffer *r)
62{
63 return r->bytes_per_datum;
64}
65
66static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
67{
68 struct iio_kfifo *kf = iio_to_kfifo(r);
69 kf->update_needed = true;
70 return 0;
71}
72
73static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
74{
75 if (r->bytes_per_datum != bpd) {
76 r->bytes_per_datum = bpd;
77 iio_mark_update_needed_kfifo(r);
78 }
79 return 0;
80}
81
82static int iio_set_length_kfifo(struct iio_buffer *r, int length)
83{
84 if (r->length != length) {
85 r->length = length;
86 iio_mark_update_needed_kfifo(r);
87 }
88 return 0;
89}
90
91static int iio_store_to_kfifo(struct iio_buffer *r,
92 u8 *data,
93 s64 timestamp)
94{
95 int ret;
96 struct iio_kfifo *kf = iio_to_kfifo(r);
97 ret = kfifo_in(&kf->kf, data, r->bytes_per_datum);
98 if (ret != r->bytes_per_datum)
99 return -EBUSY;
100 return 0;
101}
102
103static int iio_read_first_n_kfifo(struct iio_buffer *r,
104 size_t n, char __user *buf)
105{
106 int ret, copied;
107 struct iio_kfifo *kf = iio_to_kfifo(r);
108
109 if (n < r->bytes_per_datum)
110 return -EINVAL;
111
112 n = rounddown(n, r->bytes_per_datum);
113 ret = kfifo_to_user(&kf->kf, buf, n, &copied);
114
115 return copied;
116}
117
118static const struct iio_buffer_access_funcs kfifo_access_funcs = {
119 .store_to = &iio_store_to_kfifo,
120 .read_first_n = &iio_read_first_n_kfifo,
121 .request_update = &iio_request_update_kfifo,
122 .get_bytes_per_datum = &iio_get_bytes_per_datum_kfifo,
123 .set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
124 .get_length = &iio_get_length_kfifo,
125 .set_length = &iio_set_length_kfifo,
126};
127
128struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
129{
130 struct iio_kfifo *kf;
131
132 kf = kzalloc(sizeof *kf, GFP_KERNEL);
133 if (!kf)
134 return NULL;
135 kf->update_needed = true;
136 iio_buffer_init(&kf->buffer);
137 kf->buffer.attrs = &iio_kfifo_attribute_group;
138 kf->buffer.access = &kfifo_access_funcs;
139
140 return &kf->buffer;
141}
142EXPORT_SYMBOL(iio_kfifo_allocate);
143
144void iio_kfifo_free(struct iio_buffer *r)
145{
146 kfree(iio_to_kfifo(r));
147}
148EXPORT_SYMBOL(iio_kfifo_free);
149
150MODULE_LICENSE("GPL");