aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iio
diff options
context:
space:
mode:
authorOctavian Purdila <octavian.purdila@intel.com>2015-03-22 14:33:39 -0400
committerJonathan Cameron <jic23@kernel.org>2015-03-29 11:17:10 -0400
commitf4f4673b7535eff4ee1a8cfb1685fa1e1a0cb79d (patch)
treec2914fbba47032f26e1c1031e3eb17c41b2c3402 /drivers/iio
parent37d3455672732b29a477732a94abfe95e199f0ce (diff)
iio: add support for hardware fifo
Some devices have hardware buffers that can store a number of samples for later consumption. Hardware usually provides interrupts to notify the processor when the FIFO is full or when it has reached a certain watermark level. This helps with reducing the number of interrupts to the host processor and thus it helps decreasing the power consumption. This patch enables usage of hardware FIFOs for IIO devices in conjunction with software device buffers. When the hardware FIFO is enabled the samples are stored in the hardware FIFO. The samples are later flushed to the device software buffer when the number of entries in the hardware FIFO reaches the hardware watermark or when a flush operation is triggered by the user when doing a non-blocking read on an empty software device buffer. In order to implement hardware FIFO support the device drivers must implement the following new operations: setting and getting the hardware FIFO watermark level, flushing the hardware FIFO to the software device buffer. The device must also expose information about the hardware FIFO such it's minimum and maximum watermark and if necessary a list of supported watermark values. Finally, the device driver must activate the hardware FIFO when the device buffer is enabled, if the current device settings allows it. The software device buffer watermark is passed by the IIO core to the device driver as a hint for the hardware FIFO watermark. The device driver can adjust this value to allow for hardware limitations (such as capping it to the maximum hardware watermark or adjust it to a value that is supported by the hardware). It can also disable the hardware watermark (and implicitly the hardware FIFO) it this value is below the minimum hardware watermark. Since a driver may support hardware FIFO only when not in triggered buffer mode (due to different semantics of hardware FIFO sampling and triggered sampling) this patch changes the IIO core code to allow falling back to non-triggered buffered mode if no trigger is enabled. Signed-off-by: Octavian Purdila <octavian.purdila@intel.com> Reviewed-by: Lars-Peter Clausen <lars@metafoo.de> Signed-off-by: Jonathan Cameron <jic23@kernel.org>
Diffstat (limited to 'drivers/iio')
-rw-r--r--drivers/iio/industrialio-buffer.c58
1 files changed, 45 insertions, 13 deletions
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index a24b2e005eb3..df919f44d513 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -42,18 +42,47 @@ static size_t iio_buffer_data_available(struct iio_buffer *buf)
42 return buf->access->data_available(buf); 42 return buf->access->data_available(buf);
43} 43}
44 44
45static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
46 struct iio_buffer *buf, size_t required)
47{
48 if (!indio_dev->info->hwfifo_flush_to_buffer)
49 return -ENODEV;
50
51 return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
52}
53
45static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf, 54static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
46 size_t to_wait) 55 size_t to_wait, int to_flush)
47{ 56{
57 size_t avail;
58 int flushed = 0;
59
48 /* wakeup if the device was unregistered */ 60 /* wakeup if the device was unregistered */
49 if (!indio_dev->info) 61 if (!indio_dev->info)
50 return true; 62 return true;
51 63
52 /* drain the buffer if it was disabled */ 64 /* drain the buffer if it was disabled */
53 if (!iio_buffer_is_active(buf)) 65 if (!iio_buffer_is_active(buf)) {
54 to_wait = min_t(size_t, to_wait, 1); 66 to_wait = min_t(size_t, to_wait, 1);
67 to_flush = 0;
68 }
69
70 avail = iio_buffer_data_available(buf);
55 71
56 if (iio_buffer_data_available(buf) >= to_wait) 72 if (avail >= to_wait) {
73 /* force a flush for non-blocking reads */
74 if (!to_wait && !avail && to_flush)
75 iio_buffer_flush_hwfifo(indio_dev, buf, to_flush);
76 return true;
77 }
78
79 if (to_flush)
80 flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
81 to_wait - avail);
82 if (flushed <= 0)
83 return false;
84
85 if (avail + flushed >= to_wait)
57 return true; 86 return true;
58 87
59 return false; 88 return false;
@@ -72,6 +101,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
72 struct iio_buffer *rb = indio_dev->buffer; 101 struct iio_buffer *rb = indio_dev->buffer;
73 size_t datum_size; 102 size_t datum_size;
74 size_t to_wait = 0; 103 size_t to_wait = 0;
104 size_t to_read;
75 int ret; 105 int ret;
76 106
77 if (!indio_dev->info) 107 if (!indio_dev->info)
@@ -89,12 +119,14 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
89 if (!datum_size) 119 if (!datum_size)
90 return 0; 120 return 0;
91 121
122 to_read = min_t(size_t, n / datum_size, rb->watermark);
123
92 if (!(filp->f_flags & O_NONBLOCK)) 124 if (!(filp->f_flags & O_NONBLOCK))
93 to_wait = min_t(size_t, n / datum_size, rb->watermark); 125 to_wait = to_read;
94 126
95 do { 127 do {
96 ret = wait_event_interruptible(rb->pollq, 128 ret = wait_event_interruptible(rb->pollq,
97 iio_buffer_ready(indio_dev, rb, to_wait)); 129 iio_buffer_ready(indio_dev, rb, to_wait, to_read));
98 if (ret) 130 if (ret)
99 return ret; 131 return ret;
100 132
@@ -122,7 +154,7 @@ unsigned int iio_buffer_poll(struct file *filp,
122 return -ENODEV; 154 return -ENODEV;
123 155
124 poll_wait(filp, &rb->pollq, wait); 156 poll_wait(filp, &rb->pollq, wait);
125 if (iio_buffer_ready(indio_dev, rb, rb->watermark)) 157 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
126 return POLLIN | POLLRDNORM; 158 return POLLIN | POLLRDNORM;
127 return 0; 159 return 0;
128} 160}
@@ -661,19 +693,16 @@ static int __iio_update_buffers(struct iio_dev *indio_dev,
661 } 693 }
662 } 694 }
663 /* Definitely possible for devices to support both of these. */ 695 /* Definitely possible for devices to support both of these. */
664 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) { 696 if ((indio_dev->modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
665 if (!indio_dev->trig) {
666 printk(KERN_INFO "Buffer not started: no trigger\n");
667 ret = -EINVAL;
668 /* Can only occur on first buffer */
669 goto error_run_postdisable;
670 }
671 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; 697 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
672 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) { 698 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
673 indio_dev->currentmode = INDIO_BUFFER_HARDWARE; 699 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
674 } else if (indio_dev->modes & INDIO_BUFFER_SOFTWARE) { 700 } else if (indio_dev->modes & INDIO_BUFFER_SOFTWARE) {
675 indio_dev->currentmode = INDIO_BUFFER_SOFTWARE; 701 indio_dev->currentmode = INDIO_BUFFER_SOFTWARE;
676 } else { /* Should never be reached */ 702 } else { /* Should never be reached */
703 /* Can only occur on first buffer */
704 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
705 pr_info("Buffer not started: no trigger\n");
677 ret = -EINVAL; 706 ret = -EINVAL;
678 goto error_run_postdisable; 707 goto error_run_postdisable;
679 } 708 }
@@ -825,6 +854,9 @@ static ssize_t iio_buffer_store_watermark(struct device *dev,
825 } 854 }
826 855
827 buffer->watermark = val; 856 buffer->watermark = val;
857
858 if (indio_dev->info->hwfifo_set_watermark)
859 indio_dev->info->hwfifo_set_watermark(indio_dev, val);
828out: 860out:
829 mutex_unlock(&indio_dev->mlock); 861 mutex_unlock(&indio_dev->mlock);
830 862