aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iio
diff options
context:
space:
mode:
authorJosselin Costanzi <josselin.costanzi@mobile-devices.fr>2015-03-22 14:33:38 -0400
committerJonathan Cameron <jic23@kernel.org>2015-03-29 11:17:09 -0400
commit37d3455672732b29a477732a94abfe95e199f0ce (patch)
treea2e4920bbecaec6f3773cbc5a7228d17283b7d21 /drivers/iio
parent9444a300c2be3ce6266462e3171ceb6636cc62e8 (diff)
iio: add watermark logic to iio read and poll
Currently the IIO buffer blocking read only wait until at least one data element is available. This patch makes the reader sleep until enough data is collected before returning to userspace. This should limit the read() calls count when trying to get data in batches. Co-author: Yannick Bedhomme <yannick.bedhomme@mobile-devices.fr> Signed-off-by: Josselin Costanzi <josselin.costanzi@mobile-devices.fr> [rebased and remove buffer timeout] Signed-off-by: Octavian Purdila <octavian.purdila@intel.com> Reviewed-by: Lars-Peter Clausen <lars@metafoo.de> Signed-off-by: Jonathan Cameron <jic23@kernel.org>
Diffstat (limited to 'drivers/iio')
-rw-r--r--drivers/iio/industrialio-buffer.c120
-rw-r--r--drivers/iio/kfifo_buf.c11
2 files changed, 109 insertions, 22 deletions
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index c2d5440aa226..a24b2e005eb3 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -37,11 +37,28 @@ static bool iio_buffer_is_active(struct iio_buffer *buf)
37 return !list_empty(&buf->buffer_list); 37 return !list_empty(&buf->buffer_list);
38} 38}
39 39
40static bool iio_buffer_data_available(struct iio_buffer *buf) 40static size_t iio_buffer_data_available(struct iio_buffer *buf)
41{ 41{
42 return buf->access->data_available(buf); 42 return buf->access->data_available(buf);
43} 43}
44 44
45static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
46 size_t to_wait)
47{
48 /* wakeup if the device was unregistered */
49 if (!indio_dev->info)
50 return true;
51
52 /* drain the buffer if it was disabled */
53 if (!iio_buffer_is_active(buf))
54 to_wait = min_t(size_t, to_wait, 1);
55
56 if (iio_buffer_data_available(buf) >= to_wait)
57 return true;
58
59 return false;
60}
61
45/** 62/**
46 * iio_buffer_read_first_n_outer() - chrdev read for buffer access 63 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
47 * 64 *
@@ -53,6 +70,8 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
53{ 70{
54 struct iio_dev *indio_dev = filp->private_data; 71 struct iio_dev *indio_dev = filp->private_data;
55 struct iio_buffer *rb = indio_dev->buffer; 72 struct iio_buffer *rb = indio_dev->buffer;
73 size_t datum_size;
74 size_t to_wait = 0;
56 int ret; 75 int ret;
57 76
58 if (!indio_dev->info) 77 if (!indio_dev->info)
@@ -61,19 +80,26 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
61 if (!rb || !rb->access->read_first_n) 80 if (!rb || !rb->access->read_first_n)
62 return -EINVAL; 81 return -EINVAL;
63 82
83 datum_size = rb->bytes_per_datum;
84
85 /*
86 * If datum_size is 0 there will never be anything to read from the
87 * buffer, so signal end of file now.
88 */
89 if (!datum_size)
90 return 0;
91
92 if (!(filp->f_flags & O_NONBLOCK))
93 to_wait = min_t(size_t, n / datum_size, rb->watermark);
94
64 do { 95 do {
65 if (!iio_buffer_data_available(rb)) { 96 ret = wait_event_interruptible(rb->pollq,
66 if (filp->f_flags & O_NONBLOCK) 97 iio_buffer_ready(indio_dev, rb, to_wait));
67 return -EAGAIN; 98 if (ret)
99 return ret;
68 100
69 ret = wait_event_interruptible(rb->pollq, 101 if (!indio_dev->info)
70 iio_buffer_data_available(rb) || 102 return -ENODEV;
71 indio_dev->info == NULL);
72 if (ret)
73 return ret;
74 if (indio_dev->info == NULL)
75 return -ENODEV;
76 }
77 103
78 ret = rb->access->read_first_n(rb, n, buf); 104 ret = rb->access->read_first_n(rb, n, buf);
79 if (ret == 0 && (filp->f_flags & O_NONBLOCK)) 105 if (ret == 0 && (filp->f_flags & O_NONBLOCK))
@@ -96,9 +122,8 @@ unsigned int iio_buffer_poll(struct file *filp,
96 return -ENODEV; 122 return -ENODEV;
97 123
98 poll_wait(filp, &rb->pollq, wait); 124 poll_wait(filp, &rb->pollq, wait);
99 if (iio_buffer_data_available(rb)) 125 if (iio_buffer_ready(indio_dev, rb, rb->watermark))
100 return POLLIN | POLLRDNORM; 126 return POLLIN | POLLRDNORM;
101 /* need a way of knowing if there may be enough data... */
102 return 0; 127 return 0;
103} 128}
104 129
@@ -123,6 +148,7 @@ void iio_buffer_init(struct iio_buffer *buffer)
123 INIT_LIST_HEAD(&buffer->buffer_list); 148 INIT_LIST_HEAD(&buffer->buffer_list);
124 init_waitqueue_head(&buffer->pollq); 149 init_waitqueue_head(&buffer->pollq);
125 kref_init(&buffer->ref); 150 kref_init(&buffer->ref);
151 buffer->watermark = 1;
126} 152}
127EXPORT_SYMBOL(iio_buffer_init); 153EXPORT_SYMBOL(iio_buffer_init);
128 154
@@ -416,6 +442,11 @@ static ssize_t iio_buffer_write_length(struct device *dev,
416 buffer->access->set_length(buffer, val); 442 buffer->access->set_length(buffer, val);
417 ret = 0; 443 ret = 0;
418 } 444 }
445 if (ret)
446 goto out;
447 if (buffer->length && buffer->length < buffer->watermark)
448 buffer->watermark = buffer->length;
449out:
419 mutex_unlock(&indio_dev->mlock); 450 mutex_unlock(&indio_dev->mlock);
420 451
421 return ret ? ret : len; 452 return ret ? ret : len;
@@ -472,6 +503,7 @@ static void iio_buffer_activate(struct iio_dev *indio_dev,
472static void iio_buffer_deactivate(struct iio_buffer *buffer) 503static void iio_buffer_deactivate(struct iio_buffer *buffer)
473{ 504{
474 list_del_init(&buffer->buffer_list); 505 list_del_init(&buffer->buffer_list);
506 wake_up_interruptible(&buffer->pollq);
475 iio_buffer_put(buffer); 507 iio_buffer_put(buffer);
476} 508}
477 509
@@ -754,16 +786,64 @@ done:
754 786
755static const char * const iio_scan_elements_group_name = "scan_elements"; 787static const char * const iio_scan_elements_group_name = "scan_elements";
756 788
789static ssize_t iio_buffer_show_watermark(struct device *dev,
790 struct device_attribute *attr,
791 char *buf)
792{
793 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
794 struct iio_buffer *buffer = indio_dev->buffer;
795
796 return sprintf(buf, "%u\n", buffer->watermark);
797}
798
799static ssize_t iio_buffer_store_watermark(struct device *dev,
800 struct device_attribute *attr,
801 const char *buf,
802 size_t len)
803{
804 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
805 struct iio_buffer *buffer = indio_dev->buffer;
806 unsigned int val;
807 int ret;
808
809 ret = kstrtouint(buf, 10, &val);
810 if (ret)
811 return ret;
812 if (!val)
813 return -EINVAL;
814
815 mutex_lock(&indio_dev->mlock);
816
817 if (val > buffer->length) {
818 ret = -EINVAL;
819 goto out;
820 }
821
822 if (iio_buffer_is_active(indio_dev->buffer)) {
823 ret = -EBUSY;
824 goto out;
825 }
826
827 buffer->watermark = val;
828out:
829 mutex_unlock(&indio_dev->mlock);
830
831 return ret ? ret : len;
832}
833
757static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length, 834static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
758 iio_buffer_write_length); 835 iio_buffer_write_length);
759static struct device_attribute dev_attr_length_ro = __ATTR(length, 836static struct device_attribute dev_attr_length_ro = __ATTR(length,
760 S_IRUGO, iio_buffer_read_length, NULL); 837 S_IRUGO, iio_buffer_read_length, NULL);
761static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, 838static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
762 iio_buffer_show_enable, iio_buffer_store_enable); 839 iio_buffer_show_enable, iio_buffer_store_enable);
840static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
841 iio_buffer_show_watermark, iio_buffer_store_watermark);
763 842
764static struct attribute *iio_buffer_attrs[] = { 843static struct attribute *iio_buffer_attrs[] = {
765 &dev_attr_length.attr, 844 &dev_attr_length.attr,
766 &dev_attr_enable.attr, 845 &dev_attr_enable.attr,
846 &dev_attr_watermark.attr,
767}; 847};
768 848
769int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev) 849int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
@@ -944,8 +1024,18 @@ static const void *iio_demux(struct iio_buffer *buffer,
944static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) 1024static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
945{ 1025{
946 const void *dataout = iio_demux(buffer, data); 1026 const void *dataout = iio_demux(buffer, data);
1027 int ret;
1028
1029 ret = buffer->access->store_to(buffer, dataout);
1030 if (ret)
1031 return ret;
947 1032
948 return buffer->access->store_to(buffer, dataout); 1033 /*
1034 * We can't just test for watermark to decide if we wake the poll queue
1035 * because read may request less samples than the watermark.
1036 */
1037 wake_up_interruptible_poll(&buffer->pollq, POLLIN | POLLRDNORM);
1038 return 0;
949} 1039}
950 1040
951static void iio_buffer_demux_free(struct iio_buffer *buffer) 1041static void iio_buffer_demux_free(struct iio_buffer *buffer)
diff --git a/drivers/iio/kfifo_buf.c b/drivers/iio/kfifo_buf.c
index b2beea01c49b..847ca561afe0 100644
--- a/drivers/iio/kfifo_buf.c
+++ b/drivers/iio/kfifo_buf.c
@@ -83,9 +83,6 @@ static int iio_store_to_kfifo(struct iio_buffer *r,
83 ret = kfifo_in(&kf->kf, data, 1); 83 ret = kfifo_in(&kf->kf, data, 1);
84 if (ret != 1) 84 if (ret != 1)
85 return -EBUSY; 85 return -EBUSY;
86
87 wake_up_interruptible_poll(&r->pollq, POLLIN | POLLRDNORM);
88
89 return 0; 86 return 0;
90} 87}
91 88
@@ -109,16 +106,16 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
109 return copied; 106 return copied;
110} 107}
111 108
112static bool iio_kfifo_buf_data_available(struct iio_buffer *r) 109static size_t iio_kfifo_buf_data_available(struct iio_buffer *r)
113{ 110{
114 struct iio_kfifo *kf = iio_to_kfifo(r); 111 struct iio_kfifo *kf = iio_to_kfifo(r);
115 bool empty; 112 size_t samples;
116 113
117 mutex_lock(&kf->user_lock); 114 mutex_lock(&kf->user_lock);
118 empty = kfifo_is_empty(&kf->kf); 115 samples = kfifo_len(&kf->kf);
119 mutex_unlock(&kf->user_lock); 116 mutex_unlock(&kf->user_lock);
120 117
121 return !empty; 118 return samples;
122} 119}
123 120
124static void iio_kfifo_buffer_release(struct iio_buffer *buffer) 121static void iio_kfifo_buffer_release(struct iio_buffer *buffer)