aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iio
diff options
context:
space:
mode:
authorLars-Peter Clausen <lars@metafoo.de>2014-02-14 13:49:00 -0500
committerJonathan Cameron <jic23@kernel.org>2014-02-23 10:53:25 -0500
commitb91accafbb1031b80d22ad83576877ff2f8b4774 (patch)
tree9dd4d0974af521bdc8a6f727526b2e30b8fcc53a /drivers/iio
parent931878405b869093c90d57a0a34f0c2b3641c4ea (diff)
iio:event: Fix and cleanup locking
The event code currently holds a spinlock with IRQs disabled while calling kfifo_to_user(). kfifo_to_user() can generate a page fault though, which means we have to be able to sleep, which is not possible if the interrupts are disabled. The good thing is that kfifo handles concurrent read and write access just fine as long as there is only one reader and one writer, so we do not any locking to protect against concurrent access from the read and writer thread. It is possible though that userspace is trying to read from the event FIFO from multiple concurrent threads, so we need to add locking to protect against this. This is done using a mutex. The mutex will only protect the kfifo_to_user() call, it will not protect the waitqueue. This means that multiple threads can be waiting for new data and once a new event is added to the FIFO all waiting threads will be woken up. If one of those threads is unable to read any data (because another thread already read all the data) it will go back to sleep. The only remaining issue is that now that the clearing of the BUSY flag and the emptying of the FIFO does no longer happen in one atomic step it is possible that a event is added to the FIFO after it has been emptied and this sample will be visible the next time a new event file descriptor is created. To avoid this rather move the emptying of the FIFO from iio_event_chrdev_release to iio_event_getfd(). Signed-off-by: Lars-Peter Clausen <lars@metafoo.de> Signed-off-by: Jonathan Cameron <jic23@kernel.org>
Diffstat (limited to 'drivers/iio')
-rw-r--r--drivers/iio/industrialio-event.c83
1 files changed, 41 insertions, 42 deletions
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 2e6f8e026fab..ea6e06b9c7d4 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -40,6 +40,7 @@ struct iio_event_interface {
40 struct list_head dev_attr_list; 40 struct list_head dev_attr_list;
41 unsigned long flags; 41 unsigned long flags;
42 struct attribute_group group; 42 struct attribute_group group;
43 struct mutex read_lock;
43}; 44};
44 45
45/** 46/**
@@ -47,16 +48,17 @@ struct iio_event_interface {
47 * @indio_dev: IIO device structure 48 * @indio_dev: IIO device structure
48 * @ev_code: What event 49 * @ev_code: What event
49 * @timestamp: When the event occurred 50 * @timestamp: When the event occurred
51 *
52 * Note: The caller must make sure that this function is not running
53 * concurrently for the same indio_dev more than once.
50 **/ 54 **/
51int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp) 55int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
52{ 56{
53 struct iio_event_interface *ev_int = indio_dev->event_interface; 57 struct iio_event_interface *ev_int = indio_dev->event_interface;
54 struct iio_event_data ev; 58 struct iio_event_data ev;
55 unsigned long flags;
56 int copied; 59 int copied;
57 60
58 /* Does anyone care? */ 61 /* Does anyone care? */
59 spin_lock_irqsave(&ev_int->wait.lock, flags);
60 if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { 62 if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
61 63
62 ev.id = ev_code; 64 ev.id = ev_code;
@@ -64,9 +66,8 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
64 66
65 copied = kfifo_put(&ev_int->det_events, ev); 67 copied = kfifo_put(&ev_int->det_events, ev);
66 if (copied != 0) 68 if (copied != 0)
67 wake_up_locked_poll(&ev_int->wait, POLLIN); 69 wake_up_poll(&ev_int->wait, POLLIN);
68 } 70 }
69 spin_unlock_irqrestore(&ev_int->wait.lock, flags);
70 71
71 return 0; 72 return 0;
72} 73}
@@ -87,10 +88,8 @@ static unsigned int iio_event_poll(struct file *filep,
87 88
88 poll_wait(filep, &ev_int->wait, wait); 89 poll_wait(filep, &ev_int->wait, wait);
89 90
90 spin_lock_irq(&ev_int->wait.lock);
91 if (!kfifo_is_empty(&ev_int->det_events)) 91 if (!kfifo_is_empty(&ev_int->det_events))
92 events = POLLIN | POLLRDNORM; 92 events = POLLIN | POLLRDNORM;
93 spin_unlock_irq(&ev_int->wait.lock);
94 93
95 return events; 94 return events;
96} 95}
@@ -111,31 +110,40 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
111 if (count < sizeof(struct iio_event_data)) 110 if (count < sizeof(struct iio_event_data))
112 return -EINVAL; 111 return -EINVAL;
113 112
114 spin_lock_irq(&ev_int->wait.lock); 113 do {
115 if (kfifo_is_empty(&ev_int->det_events)) { 114 if (kfifo_is_empty(&ev_int->det_events)) {
116 if (filep->f_flags & O_NONBLOCK) { 115 if (filep->f_flags & O_NONBLOCK)
117 ret = -EAGAIN; 116 return -EAGAIN;
118 goto error_unlock; 117
119 } 118 ret = wait_event_interruptible(ev_int->wait,
120 /* Blocking on device; waiting for something to be there */
121 ret = wait_event_interruptible_locked_irq(ev_int->wait,
122 !kfifo_is_empty(&ev_int->det_events) || 119 !kfifo_is_empty(&ev_int->det_events) ||
123 indio_dev->info == NULL); 120 indio_dev->info == NULL);
124 if (ret) 121 if (ret)
125 goto error_unlock; 122 return ret;
126 if (indio_dev->info == NULL) { 123 if (indio_dev->info == NULL)
127 ret = -ENODEV; 124 return -ENODEV;
128 goto error_unlock;
129 } 125 }
130 /* Single access device so no one else can get the data */
131 }
132 126
133 ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied); 127 if (mutex_lock_interruptible(&ev_int->read_lock))
128 return -ERESTARTSYS;
129 ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
130 mutex_unlock(&ev_int->read_lock);
131
132 if (ret)
133 return ret;
134
135 /*
136 * If we couldn't read anything from the fifo (a different
137 * thread might have been faster) we either return -EAGAIN if
138 * the file descriptor is non-blocking, otherwise we go back to
139 * sleep and wait for more data to arrive.
140 */
141 if (copied == 0 && (filep->f_flags & O_NONBLOCK))
142 return -EAGAIN;
134 143
135error_unlock: 144 } while (copied == 0);
136 spin_unlock_irq(&ev_int->wait.lock);
137 145
138 return ret ? ret : copied; 146 return copied;
139} 147}
140 148
141static int iio_event_chrdev_release(struct inode *inode, struct file *filep) 149static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
@@ -143,15 +151,7 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
143 struct iio_dev *indio_dev = filep->private_data; 151 struct iio_dev *indio_dev = filep->private_data;
144 struct iio_event_interface *ev_int = indio_dev->event_interface; 152 struct iio_event_interface *ev_int = indio_dev->event_interface;
145 153
146 spin_lock_irq(&ev_int->wait.lock); 154 clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
147 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
148 /*
149 * In order to maintain a clean state for reopening,
150 * clear out any awaiting events. The mask will prevent
151 * any new __iio_push_event calls running.
152 */
153 kfifo_reset_out(&ev_int->det_events);
154 spin_unlock_irq(&ev_int->wait.lock);
155 155
156 iio_device_put(indio_dev); 156 iio_device_put(indio_dev);
157 157
@@ -174,22 +174,20 @@ int iio_event_getfd(struct iio_dev *indio_dev)
174 if (ev_int == NULL) 174 if (ev_int == NULL)
175 return -ENODEV; 175 return -ENODEV;
176 176
177 spin_lock_irq(&ev_int->wait.lock); 177 if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags))
178 if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
179 spin_unlock_irq(&ev_int->wait.lock);
180 return -EBUSY; 178 return -EBUSY;
181 } 179
182 spin_unlock_irq(&ev_int->wait.lock);
183 iio_device_get(indio_dev); 180 iio_device_get(indio_dev);
184 181
185 fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops, 182 fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
186 indio_dev, O_RDONLY | O_CLOEXEC); 183 indio_dev, O_RDONLY | O_CLOEXEC);
187 if (fd < 0) { 184 if (fd < 0) {
188 spin_lock_irq(&ev_int->wait.lock); 185 clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
189 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
190 spin_unlock_irq(&ev_int->wait.lock);
191 iio_device_put(indio_dev); 186 iio_device_put(indio_dev);
187 } else {
188 kfifo_reset_out(&ev_int->det_events);
192 } 189 }
190
193 return fd; 191 return fd;
194} 192}
195 193
@@ -424,6 +422,7 @@ static void iio_setup_ev_int(struct iio_event_interface *ev_int)
424{ 422{
425 INIT_KFIFO(ev_int->det_events); 423 INIT_KFIFO(ev_int->det_events);
426 init_waitqueue_head(&ev_int->wait); 424 init_waitqueue_head(&ev_int->wait);
425 mutex_init(&ev_int->read_lock);
427} 426}
428 427
429static const char *iio_event_group_name = "events"; 428static const char *iio_event_group_name = "events";