diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2010-11-15 04:39:57 -0500 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2010-11-18 03:25:45 -0500 |
commit | 8ee294cd9def0004887da7f44b80563493b0a097 (patch) | |
tree | b1c7ff0136fa2c359fdf6898a185921dd5a5db92 /drivers/input/serio/serio.c | |
parent | ce16a474f6305dd631c885ba970d5746e4d5c803 (diff) |
Input: serio - convert to common workqueue instead of a thread
Instead of creating an exclusive thread to handle serio events (which
happen rarely), let's switch to using common workqueue. With the arrival
of concurrency-managed workqueue infrastructure we are not concerned
that our callers or callees also using workqueue (no deadlocks anymore)
and it should reduce total number of threads in the system.
Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
Diffstat (limited to 'drivers/input/serio/serio.c')
-rw-r--r-- | drivers/input/serio/serio.c | 155 |
1 files changed, 69 insertions, 86 deletions
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c index 405bf214527c..db5b0bca1a1a 100644 --- a/drivers/input/serio/serio.c +++ b/drivers/input/serio/serio.c | |||
@@ -32,10 +32,9 @@ | |||
32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
33 | #include <linux/serio.h> | 33 | #include <linux/serio.h> |
34 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
35 | #include <linux/wait.h> | ||
36 | #include <linux/sched.h> | 35 | #include <linux/sched.h> |
37 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
38 | #include <linux/kthread.h> | 37 | #include <linux/workqueue.h> |
39 | #include <linux/mutex.h> | 38 | #include <linux/mutex.h> |
40 | 39 | ||
41 | MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); | 40 | MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); |
@@ -44,7 +43,7 @@ MODULE_LICENSE("GPL"); | |||
44 | 43 | ||
45 | /* | 44 | /* |
46 | * serio_mutex protects entire serio subsystem and is taken every time | 45 | * serio_mutex protects entire serio subsystem and is taken every time |
47 | * serio port or driver registrered or unregistered. | 46 | * serio port or driver registered or unregistered. |
48 | */ | 47 | */ |
49 | static DEFINE_MUTEX(serio_mutex); | 48 | static DEFINE_MUTEX(serio_mutex); |
50 | 49 | ||
@@ -165,58 +164,22 @@ struct serio_event { | |||
165 | 164 | ||
166 | static DEFINE_SPINLOCK(serio_event_lock); /* protects serio_event_list */ | 165 | static DEFINE_SPINLOCK(serio_event_lock); /* protects serio_event_list */ |
167 | static LIST_HEAD(serio_event_list); | 166 | static LIST_HEAD(serio_event_list); |
168 | static DECLARE_WAIT_QUEUE_HEAD(serio_wait); | ||
169 | static struct task_struct *serio_task; | ||
170 | 167 | ||
171 | static int serio_queue_event(void *object, struct module *owner, | 168 | static struct serio_event *serio_get_event(void) |
172 | enum serio_event_type event_type) | ||
173 | { | 169 | { |
170 | struct serio_event *event = NULL; | ||
174 | unsigned long flags; | 171 | unsigned long flags; |
175 | struct serio_event *event; | ||
176 | int retval = 0; | ||
177 | 172 | ||
178 | spin_lock_irqsave(&serio_event_lock, flags); | 173 | spin_lock_irqsave(&serio_event_lock, flags); |
179 | 174 | ||
180 | /* | 175 | if (!list_empty(&serio_event_list)) { |
181 | * Scan event list for the other events for the same serio port, | 176 | event = list_first_entry(&serio_event_list, |
182 | * starting with the most recent one. If event is the same we | 177 | struct serio_event, node); |
183 | * do not need add new one. If event is of different type we | 178 | list_del_init(&event->node); |
184 | * need to add this event and should not look further because | ||
185 | * we need to preseve sequence of distinct events. | ||
186 | */ | ||
187 | list_for_each_entry_reverse(event, &serio_event_list, node) { | ||
188 | if (event->object == object) { | ||
189 | if (event->type == event_type) | ||
190 | goto out; | ||
191 | break; | ||
192 | } | ||
193 | } | ||
194 | |||
195 | event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC); | ||
196 | if (!event) { | ||
197 | pr_err("Not enough memory to queue event %d\n", event_type); | ||
198 | retval = -ENOMEM; | ||
199 | goto out; | ||
200 | } | ||
201 | |||
202 | if (!try_module_get(owner)) { | ||
203 | pr_warning("Can't get module reference, dropping event %d\n", | ||
204 | event_type); | ||
205 | kfree(event); | ||
206 | retval = -EINVAL; | ||
207 | goto out; | ||
208 | } | 179 | } |
209 | 180 | ||
210 | event->type = event_type; | ||
211 | event->object = object; | ||
212 | event->owner = owner; | ||
213 | |||
214 | list_add_tail(&event->node, &serio_event_list); | ||
215 | wake_up(&serio_wait); | ||
216 | |||
217 | out: | ||
218 | spin_unlock_irqrestore(&serio_event_lock, flags); | 181 | spin_unlock_irqrestore(&serio_event_lock, flags); |
219 | return retval; | 182 | return event; |
220 | } | 183 | } |
221 | 184 | ||
222 | static void serio_free_event(struct serio_event *event) | 185 | static void serio_free_event(struct serio_event *event) |
@@ -250,25 +213,7 @@ static void serio_remove_duplicate_events(struct serio_event *event) | |||
250 | spin_unlock_irqrestore(&serio_event_lock, flags); | 213 | spin_unlock_irqrestore(&serio_event_lock, flags); |
251 | } | 214 | } |
252 | 215 | ||
253 | 216 | static void serio_handle_event(struct work_struct *work) | |
254 | static struct serio_event *serio_get_event(void) | ||
255 | { | ||
256 | struct serio_event *event = NULL; | ||
257 | unsigned long flags; | ||
258 | |||
259 | spin_lock_irqsave(&serio_event_lock, flags); | ||
260 | |||
261 | if (!list_empty(&serio_event_list)) { | ||
262 | event = list_first_entry(&serio_event_list, | ||
263 | struct serio_event, node); | ||
264 | list_del_init(&event->node); | ||
265 | } | ||
266 | |||
267 | spin_unlock_irqrestore(&serio_event_lock, flags); | ||
268 | return event; | ||
269 | } | ||
270 | |||
271 | static void serio_handle_event(void) | ||
272 | { | 217 | { |
273 | struct serio_event *event; | 218 | struct serio_event *event; |
274 | 219 | ||
@@ -307,6 +252,59 @@ static void serio_handle_event(void) | |||
307 | mutex_unlock(&serio_mutex); | 252 | mutex_unlock(&serio_mutex); |
308 | } | 253 | } |
309 | 254 | ||
255 | static DECLARE_WORK(serio_event_work, serio_handle_event); | ||
256 | |||
257 | static int serio_queue_event(void *object, struct module *owner, | ||
258 | enum serio_event_type event_type) | ||
259 | { | ||
260 | unsigned long flags; | ||
261 | struct serio_event *event; | ||
262 | int retval = 0; | ||
263 | |||
264 | spin_lock_irqsave(&serio_event_lock, flags); | ||
265 | |||
266 | /* | ||
267 | * Scan event list for the other events for the same serio port, | ||
268 | * starting with the most recent one. If event is the same we | ||
269 | * do not need add new one. If event is of different type we | ||
270 | * need to add this event and should not look further because | ||
271 | * we need to preseve sequence of distinct events. | ||
272 | */ | ||
273 | list_for_each_entry_reverse(event, &serio_event_list, node) { | ||
274 | if (event->object == object) { | ||
275 | if (event->type == event_type) | ||
276 | goto out; | ||
277 | break; | ||
278 | } | ||
279 | } | ||
280 | |||
281 | event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC); | ||
282 | if (!event) { | ||
283 | pr_err("Not enough memory to queue event %d\n", event_type); | ||
284 | retval = -ENOMEM; | ||
285 | goto out; | ||
286 | } | ||
287 | |||
288 | if (!try_module_get(owner)) { | ||
289 | pr_warning("Can't get module reference, dropping event %d\n", | ||
290 | event_type); | ||
291 | kfree(event); | ||
292 | retval = -EINVAL; | ||
293 | goto out; | ||
294 | } | ||
295 | |||
296 | event->type = event_type; | ||
297 | event->object = object; | ||
298 | event->owner = owner; | ||
299 | |||
300 | list_add_tail(&event->node, &serio_event_list); | ||
301 | schedule_work(&serio_event_work); | ||
302 | |||
303 | out: | ||
304 | spin_unlock_irqrestore(&serio_event_lock, flags); | ||
305 | return retval; | ||
306 | } | ||
307 | |||
310 | /* | 308 | /* |
311 | * Remove all events that have been submitted for a given | 309 | * Remove all events that have been submitted for a given |
312 | * object, be it serio port or driver. | 310 | * object, be it serio port or driver. |
@@ -356,18 +354,6 @@ static struct serio *serio_get_pending_child(struct serio *parent) | |||
356 | return child; | 354 | return child; |
357 | } | 355 | } |
358 | 356 | ||
359 | static int serio_thread(void *nothing) | ||
360 | { | ||
361 | do { | ||
362 | serio_handle_event(); | ||
363 | wait_event_interruptible(serio_wait, | ||
364 | kthread_should_stop() || !list_empty(&serio_event_list)); | ||
365 | } while (!kthread_should_stop()); | ||
366 | |||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | |||
371 | /* | 357 | /* |
372 | * Serio port operations | 358 | * Serio port operations |
373 | */ | 359 | */ |
@@ -1040,21 +1026,18 @@ static int __init serio_init(void) | |||
1040 | return error; | 1026 | return error; |
1041 | } | 1027 | } |
1042 | 1028 | ||
1043 | serio_task = kthread_run(serio_thread, NULL, "kseriod"); | ||
1044 | if (IS_ERR(serio_task)) { | ||
1045 | bus_unregister(&serio_bus); | ||
1046 | error = PTR_ERR(serio_task); | ||
1047 | pr_err("Failed to start kseriod, error: %d\n", error); | ||
1048 | return error; | ||
1049 | } | ||
1050 | |||
1051 | return 0; | 1029 | return 0; |
1052 | } | 1030 | } |
1053 | 1031 | ||
1054 | static void __exit serio_exit(void) | 1032 | static void __exit serio_exit(void) |
1055 | { | 1033 | { |
1056 | bus_unregister(&serio_bus); | 1034 | bus_unregister(&serio_bus); |
1057 | kthread_stop(serio_task); | 1035 | |
1036 | /* | ||
1037 | * There should not be any outstanding events but work may | ||
1038 | * still be scheduled so simply cancel it. | ||
1039 | */ | ||
1040 | cancel_work_sync(&serio_event_work); | ||
1058 | } | 1041 | } |
1059 | 1042 | ||
1060 | subsys_initcall(serio_init); | 1043 | subsys_initcall(serio_init); |