diff options
author | Lars-Peter Clausen <lars@metafoo.de> | 2012-01-03 08:59:40 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-02-09 13:05:06 -0500 |
commit | 43ba1100af11f34cc67bdf6b359667cfa851e6a8 (patch) | |
tree | ab8d9da8dc01a5a99f7b460b85fc0d0a94690cf6 /drivers/staging/iio | |
parent | 2c00193fa15dc51566dc7931fe32184c99c6b317 (diff) |
staging:iio:events: Use waitqueue lock to protect event queue
Use the waitqueue lock to protect the event queue instead of a custom mutex.
This has the advantage that we can call the waitqueue operations with the lock
held, which simplifies the code flow a bit.
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Acked-by: Jonathan Cameron <jic23@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging/iio')
-rw-r--r-- | drivers/staging/iio/industrialio-event.c | 43 |
1 files changed, 18 insertions, 25 deletions
diff --git a/drivers/staging/iio/industrialio-event.c b/drivers/staging/iio/industrialio-event.c index 335b615bd16..f0c41f1f505 100644 --- a/drivers/staging/iio/industrialio-event.c +++ b/drivers/staging/iio/industrialio-event.c | |||
@@ -35,7 +35,6 @@ | |||
35 | */ | 35 | */ |
36 | struct iio_event_interface { | 36 | struct iio_event_interface { |
37 | wait_queue_head_t wait; | 37 | wait_queue_head_t wait; |
38 | struct mutex event_list_lock; | ||
39 | DECLARE_KFIFO(det_events, struct iio_event_data, 16); | 38 | DECLARE_KFIFO(det_events, struct iio_event_data, 16); |
40 | 39 | ||
41 | struct list_head dev_attr_list; | 40 | struct list_head dev_attr_list; |
@@ -50,19 +49,17 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp) | |||
50 | int copied; | 49 | int copied; |
51 | 50 | ||
52 | /* Does anyone care? */ | 51 | /* Does anyone care? */ |
53 | mutex_lock(&ev_int->event_list_lock); | 52 | spin_lock(&ev_int->wait.lock); |
54 | if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { | 53 | if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { |
55 | 54 | ||
56 | ev.id = ev_code; | 55 | ev.id = ev_code; |
57 | ev.timestamp = timestamp; | 56 | ev.timestamp = timestamp; |
58 | 57 | ||
59 | copied = kfifo_put(&ev_int->det_events, &ev); | 58 | copied = kfifo_put(&ev_int->det_events, &ev); |
60 | |||
61 | mutex_unlock(&ev_int->event_list_lock); | ||
62 | if (copied != 0) | 59 | if (copied != 0) |
63 | wake_up_interruptible(&ev_int->wait); | 60 | wake_up_locked(&ev_int->wait); |
64 | } else | 61 | } |
65 | mutex_unlock(&ev_int->event_list_lock); | 62 | spin_unlock(&ev_int->wait.lock); |
66 | 63 | ||
67 | return 0; | 64 | return 0; |
68 | } | 65 | } |
@@ -80,28 +77,25 @@ static ssize_t iio_event_chrdev_read(struct file *filep, | |||
80 | if (count < sizeof(struct iio_event_data)) | 77 | if (count < sizeof(struct iio_event_data)) |
81 | return -EINVAL; | 78 | return -EINVAL; |
82 | 79 | ||
83 | mutex_lock(&ev_int->event_list_lock); | 80 | spin_lock(&ev_int->wait.lock); |
84 | if (kfifo_is_empty(&ev_int->det_events)) { | 81 | if (kfifo_is_empty(&ev_int->det_events)) { |
85 | if (filep->f_flags & O_NONBLOCK) { | 82 | if (filep->f_flags & O_NONBLOCK) { |
86 | ret = -EAGAIN; | 83 | ret = -EAGAIN; |
87 | goto error_mutex_unlock; | 84 | goto error_unlock; |
88 | } | 85 | } |
89 | mutex_unlock(&ev_int->event_list_lock); | ||
90 | /* Blocking on device; waiting for something to be there */ | 86 | /* Blocking on device; waiting for something to be there */ |
91 | ret = wait_event_interruptible(ev_int->wait, | 87 | ret = wait_event_interruptible_locked(ev_int->wait, |
92 | !kfifo_is_empty(&ev_int->det_events)); | 88 | !kfifo_is_empty(&ev_int->det_events)); |
93 | if (ret) | 89 | if (ret) |
94 | goto error_ret; | 90 | goto error_unlock; |
95 | /* Single access device so no one else can get the data */ | 91 | /* Single access device so no one else can get the data */ |
96 | mutex_lock(&ev_int->event_list_lock); | ||
97 | } | 92 | } |
98 | 93 | ||
99 | mutex_unlock(&ev_int->event_list_lock); | ||
100 | ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied); | 94 | ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied); |
101 | 95 | ||
102 | error_mutex_unlock: | 96 | error_unlock: |
103 | mutex_unlock(&ev_int->event_list_lock); | 97 | spin_unlock(&ev_int->wait.lock); |
104 | error_ret: | 98 | |
105 | return ret ? ret : copied; | 99 | return ret ? ret : copied; |
106 | } | 100 | } |
107 | 101 | ||
@@ -109,7 +103,7 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep) | |||
109 | { | 103 | { |
110 | struct iio_event_interface *ev_int = filep->private_data; | 104 | struct iio_event_interface *ev_int = filep->private_data; |
111 | 105 | ||
112 | mutex_lock(&ev_int->event_list_lock); | 106 | spin_lock(&ev_int->wait.lock); |
113 | clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); | 107 | clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); |
114 | /* | 108 | /* |
115 | * In order to maintain a clean state for reopening, | 109 | * In order to maintain a clean state for reopening, |
@@ -117,7 +111,7 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep) | |||
117 | * any new __iio_push_event calls running. | 111 | * any new __iio_push_event calls running. |
118 | */ | 112 | */ |
119 | kfifo_reset_out(&ev_int->det_events); | 113 | kfifo_reset_out(&ev_int->det_events); |
120 | mutex_unlock(&ev_int->event_list_lock); | 114 | spin_unlock(&ev_int->wait.lock); |
121 | 115 | ||
122 | return 0; | 116 | return 0; |
123 | } | 117 | } |
@@ -137,18 +131,18 @@ int iio_event_getfd(struct iio_dev *indio_dev) | |||
137 | if (ev_int == NULL) | 131 | if (ev_int == NULL) |
138 | return -ENODEV; | 132 | return -ENODEV; |
139 | 133 | ||
140 | mutex_lock(&ev_int->event_list_lock); | 134 | spin_lock(&ev_int->wait.lock); |
141 | if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { | 135 | if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { |
142 | mutex_unlock(&ev_int->event_list_lock); | 136 | spin_unlock(&ev_int->wait.lock); |
143 | return -EBUSY; | 137 | return -EBUSY; |
144 | } | 138 | } |
145 | mutex_unlock(&ev_int->event_list_lock); | 139 | spin_unlock(&ev_int->wait.lock); |
146 | fd = anon_inode_getfd("iio:event", | 140 | fd = anon_inode_getfd("iio:event", |
147 | &iio_event_chrdev_fileops, ev_int, O_RDONLY); | 141 | &iio_event_chrdev_fileops, ev_int, O_RDONLY); |
148 | if (fd < 0) { | 142 | if (fd < 0) { |
149 | mutex_lock(&ev_int->event_list_lock); | 143 | spin_lock(&ev_int->wait.lock); |
150 | clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); | 144 | clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); |
151 | mutex_unlock(&ev_int->event_list_lock); | 145 | spin_unlock(&ev_int->wait.lock); |
152 | } | 146 | } |
153 | return fd; | 147 | return fd; |
154 | } | 148 | } |
@@ -360,7 +354,6 @@ static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev) | |||
360 | 354 | ||
361 | static void iio_setup_ev_int(struct iio_event_interface *ev_int) | 355 | static void iio_setup_ev_int(struct iio_event_interface *ev_int) |
362 | { | 356 | { |
363 | mutex_init(&ev_int->event_list_lock); | ||
364 | INIT_KFIFO(ev_int->det_events); | 357 | INIT_KFIFO(ev_int->det_events); |
365 | init_waitqueue_head(&ev_int->wait); | 358 | init_waitqueue_head(&ev_int->wait); |
366 | } | 359 | } |