aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iio')
-rw-r--r--drivers/iio/industrialio-buffer.c58
1 files changed, 45 insertions, 13 deletions
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index a24b2e005eb3..df919f44d513 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -42,18 +42,47 @@ static size_t iio_buffer_data_available(struct iio_buffer *buf)
42 return buf->access->data_available(buf); 42 return buf->access->data_available(buf);
43} 43}
44 44
45static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
46 struct iio_buffer *buf, size_t required)
47{
48 if (!indio_dev->info->hwfifo_flush_to_buffer)
49 return -ENODEV;
50
51 return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
52}
53
45static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf, 54static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
46 size_t to_wait) 55 size_t to_wait, int to_flush)
47{ 56{
57 size_t avail;
58 int flushed = 0;
59
48 /* wakeup if the device was unregistered */ 60 /* wakeup if the device was unregistered */
49 if (!indio_dev->info) 61 if (!indio_dev->info)
50 return true; 62 return true;
51 63
52 /* drain the buffer if it was disabled */ 64 /* drain the buffer if it was disabled */
53 if (!iio_buffer_is_active(buf)) 65 if (!iio_buffer_is_active(buf)) {
54 to_wait = min_t(size_t, to_wait, 1); 66 to_wait = min_t(size_t, to_wait, 1);
67 to_flush = 0;
68 }
69
70 avail = iio_buffer_data_available(buf);
55 71
56 if (iio_buffer_data_available(buf) >= to_wait) 72 if (avail >= to_wait) {
73 /* force a flush for non-blocking reads */
74 if (!to_wait && !avail && to_flush)
75 iio_buffer_flush_hwfifo(indio_dev, buf, to_flush);
76 return true;
77 }
78
79 if (to_flush)
80 flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
81 to_wait - avail);
82 if (flushed <= 0)
83 return false;
84
85 if (avail + flushed >= to_wait)
57 return true; 86 return true;
58 87
59 return false; 88 return false;
@@ -72,6 +101,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
72 struct iio_buffer *rb = indio_dev->buffer; 101 struct iio_buffer *rb = indio_dev->buffer;
73 size_t datum_size; 102 size_t datum_size;
74 size_t to_wait = 0; 103 size_t to_wait = 0;
104 size_t to_read;
75 int ret; 105 int ret;
76 106
77 if (!indio_dev->info) 107 if (!indio_dev->info)
@@ -89,12 +119,14 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
89 if (!datum_size) 119 if (!datum_size)
90 return 0; 120 return 0;
91 121
122 to_read = min_t(size_t, n / datum_size, rb->watermark);
123
92 if (!(filp->f_flags & O_NONBLOCK)) 124 if (!(filp->f_flags & O_NONBLOCK))
93 to_wait = min_t(size_t, n / datum_size, rb->watermark); 125 to_wait = to_read;
94 126
95 do { 127 do {
96 ret = wait_event_interruptible(rb->pollq, 128 ret = wait_event_interruptible(rb->pollq,
97 iio_buffer_ready(indio_dev, rb, to_wait)); 129 iio_buffer_ready(indio_dev, rb, to_wait, to_read));
98 if (ret) 130 if (ret)
99 return ret; 131 return ret;
100 132
@@ -122,7 +154,7 @@ unsigned int iio_buffer_poll(struct file *filp,
122 return -ENODEV; 154 return -ENODEV;
123 155
124 poll_wait(filp, &rb->pollq, wait); 156 poll_wait(filp, &rb->pollq, wait);
125 if (iio_buffer_ready(indio_dev, rb, rb->watermark)) 157 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
126 return POLLIN | POLLRDNORM; 158 return POLLIN | POLLRDNORM;
127 return 0; 159 return 0;
128} 160}
@@ -661,19 +693,16 @@ static int __iio_update_buffers(struct iio_dev *indio_dev,
661 } 693 }
662 } 694 }
663 /* Definitely possible for devices to support both of these. */ 695 /* Definitely possible for devices to support both of these. */
664 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) { 696 if ((indio_dev->modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
665 if (!indio_dev->trig) {
666 printk(KERN_INFO "Buffer not started: no trigger\n");
667 ret = -EINVAL;
668 /* Can only occur on first buffer */
669 goto error_run_postdisable;
670 }
671 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; 697 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
672 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) { 698 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
673 indio_dev->currentmode = INDIO_BUFFER_HARDWARE; 699 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
674 } else if (indio_dev->modes & INDIO_BUFFER_SOFTWARE) { 700 } else if (indio_dev->modes & INDIO_BUFFER_SOFTWARE) {
675 indio_dev->currentmode = INDIO_BUFFER_SOFTWARE; 701 indio_dev->currentmode = INDIO_BUFFER_SOFTWARE;
676 } else { /* Should never be reached */ 702 } else { /* Should never be reached */
703 /* Can only occur on first buffer */
704 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
705 pr_info("Buffer not started: no trigger\n");
677 ret = -EINVAL; 706 ret = -EINVAL;
678 goto error_run_postdisable; 707 goto error_run_postdisable;
679 } 708 }
@@ -825,6 +854,9 @@ static ssize_t iio_buffer_store_watermark(struct device *dev,
825 } 854 }
826 855
827 buffer->watermark = val; 856 buffer->watermark = val;
857
858 if (indio_dev->info->hwfifo_set_watermark)
859 indio_dev->info->hwfifo_set_watermark(indio_dev, val);
828out: 860out:
829 mutex_unlock(&indio_dev->mlock); 861 mutex_unlock(&indio_dev->mlock);
830 862