aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/connector
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-02-03 02:22:04 -0500
committerDavid S. Miller <davem@davemloft.net>2009-02-03 02:22:04 -0500
commit1a5645bc901aea6f3f446888061b2b084bbf1ba6 (patch)
tree1100723a5bd190311eaac46ce6eff22bf69a9a86 /drivers/connector
parentf15fbcd7d857ca2ea20b57ba6dfe63aab89d0b8b (diff)
connector: create connector workqueue only while needed once
The netlink connector uses its own workqueue to relay the datas sent from userspace to the appropriate callback. If you launch the test from Documentation/connector and change it a bit to send a high flow of data, you will see thousands of events coming to the "cqueue" workqueue by looking at the workqueue tracer. This flow of events can be sent very quickly. So, to not encumber the kevent workqueue and delay other jobs, the "cqueue" workqueue should remain. But this workqueue is pointless most of the time, it will always be created (assuming you have built it of course) although only developpers with specific needs will use it. So avoid this "most of the time useless task", this patch proposes to create this workqueue only when needed once. The first jobs to be sent to connector callbacks will be sent to kevent while the "cqueue" thread creation will be scheduled to kevent too. The following jobs will continue to be scheduled to keventd until the cqueue workqueue is created, and then the rest of the jobs will continue to perform as usual, through this dedicated workqueue. Each time I tested this patch, only the first event was sent to keventd, the rest has been sent to cqueue which have been created quickly. Also, this patch fixes some trailing whitespaces on the connector files. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Evgeniy Polyakov <zbr@ioremap.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/connector')
-rw-r--r--drivers/connector/cn_queue.c80
-rw-r--r--drivers/connector/connector.c19
2 files changed, 79 insertions, 20 deletions
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index b6fe7e7a2c2f..c769ef269fb5 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * cn_queue.c 2 * cn_queue.c
3 * 3 *
4 * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 9 * the Free Software Foundation; either version 2 of the License, or
@@ -31,6 +31,48 @@
31#include <linux/connector.h> 31#include <linux/connector.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33 33
34
35/*
36 * This job is sent to the kevent workqueue.
37 * While no event is once sent to any callback, the connector workqueue
38 * is not created to avoid a useless waiting kernel task.
39 * Once the first event is received, we create this dedicated workqueue which
40 * is necessary because the flow of data can be high and we don't want
41 * to encumber keventd with that.
42 */
43static void cn_queue_create(struct work_struct *work)
44{
45 struct cn_queue_dev *dev;
46
47 dev = container_of(work, struct cn_queue_dev, wq_creation);
48
49 dev->cn_queue = create_singlethread_workqueue(dev->name);
50 /* If we fail, we will use keventd for all following connector jobs */
51 WARN_ON(!dev->cn_queue);
52}
53
54/*
55 * Queue a data sent to a callback.
56 * If the connector workqueue is already created, we queue the job on it.
57 * Otherwise, we queue the job to kevent and queue the connector workqueue
58 * creation too.
59 */
60int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work)
61{
62 struct cn_queue_dev *pdev = cbq->pdev;
63
64 if (likely(pdev->cn_queue))
65 return queue_work(pdev->cn_queue, work);
66
67 /* Don't create the connector workqueue twice */
68 if (atomic_inc_return(&pdev->wq_requested) == 1)
69 schedule_work(&pdev->wq_creation);
70 else
71 atomic_dec(&pdev->wq_requested);
72
73 return schedule_work(work);
74}
75
34void cn_queue_wrapper(struct work_struct *work) 76void cn_queue_wrapper(struct work_struct *work)
35{ 77{
36 struct cn_callback_entry *cbq = 78 struct cn_callback_entry *cbq =
@@ -58,14 +100,17 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struc
58 snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); 100 snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
59 memcpy(&cbq->id.id, id, sizeof(struct cb_id)); 101 memcpy(&cbq->id.id, id, sizeof(struct cb_id));
60 cbq->data.callback = callback; 102 cbq->data.callback = callback;
61 103
62 INIT_WORK(&cbq->work, &cn_queue_wrapper); 104 INIT_WORK(&cbq->work, &cn_queue_wrapper);
63 return cbq; 105 return cbq;
64} 106}
65 107
66static void cn_queue_free_callback(struct cn_callback_entry *cbq) 108static void cn_queue_free_callback(struct cn_callback_entry *cbq)
67{ 109{
68 flush_workqueue(cbq->pdev->cn_queue); 110 /* The first jobs have been sent to kevent, flush them too */
111 flush_scheduled_work();
112 if (cbq->pdev->cn_queue)
113 flush_workqueue(cbq->pdev->cn_queue);
69 114
70 kfree(cbq); 115 kfree(cbq);
71} 116}
@@ -143,14 +188,11 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
143 atomic_set(&dev->refcnt, 0); 188 atomic_set(&dev->refcnt, 0);
144 INIT_LIST_HEAD(&dev->queue_list); 189 INIT_LIST_HEAD(&dev->queue_list);
145 spin_lock_init(&dev->queue_lock); 190 spin_lock_init(&dev->queue_lock);
191 init_waitqueue_head(&dev->wq_created);
146 192
147 dev->nls = nls; 193 dev->nls = nls;
148 194
149 dev->cn_queue = create_singlethread_workqueue(dev->name); 195 INIT_WORK(&dev->wq_creation, cn_queue_create);
150 if (!dev->cn_queue) {
151 kfree(dev);
152 return NULL;
153 }
154 196
155 return dev; 197 return dev;
156} 198}
@@ -158,9 +200,25 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
158void cn_queue_free_dev(struct cn_queue_dev *dev) 200void cn_queue_free_dev(struct cn_queue_dev *dev)
159{ 201{
160 struct cn_callback_entry *cbq, *n; 202 struct cn_callback_entry *cbq, *n;
203 long timeout;
204 DEFINE_WAIT(wait);
205
206 /* Flush the first pending jobs queued on kevent */
207 flush_scheduled_work();
208
209 /* If the connector workqueue creation is still pending, wait for it */
210 prepare_to_wait(&dev->wq_created, &wait, TASK_UNINTERRUPTIBLE);
211 if (atomic_read(&dev->wq_requested) && !dev->cn_queue) {
212 timeout = schedule_timeout(HZ * 2);
213 if (!timeout && !dev->cn_queue)
214 WARN_ON(1);
215 }
216 finish_wait(&dev->wq_created, &wait);
161 217
162 flush_workqueue(dev->cn_queue); 218 if (dev->cn_queue) {
163 destroy_workqueue(dev->cn_queue); 219 flush_workqueue(dev->cn_queue);
220 destroy_workqueue(dev->cn_queue);
221 }
164 222
165 spin_lock_bh(&dev->queue_lock); 223 spin_lock_bh(&dev->queue_lock);
166 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) 224 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index bf4830082a13..fd336c5a9057 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * connector.c 2 * connector.c
3 * 3 *
4 * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 9 * the Free Software Foundation; either version 2 of the License, or
@@ -145,14 +145,13 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
145 __cbq->data.ddata = data; 145 __cbq->data.ddata = data;
146 __cbq->data.destruct_data = destruct_data; 146 __cbq->data.destruct_data = destruct_data;
147 147
148 if (queue_work(dev->cbdev->cn_queue, 148 if (queue_cn_work(__cbq, &__cbq->work))
149 &__cbq->work))
150 err = 0; 149 err = 0;
151 else 150 else
152 err = -EINVAL; 151 err = -EINVAL;
153 } else { 152 } else {
154 struct cn_callback_data *d; 153 struct cn_callback_data *d;
155 154
156 err = -ENOMEM; 155 err = -ENOMEM;
157 __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC); 156 __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC);
158 if (__new_cbq) { 157 if (__new_cbq) {
@@ -163,10 +162,12 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
163 d->destruct_data = destruct_data; 162 d->destruct_data = destruct_data;
164 d->free = __new_cbq; 163 d->free = __new_cbq;
165 164
165 __new_cbq->pdev = __cbq->pdev;
166
166 INIT_WORK(&__new_cbq->work, 167 INIT_WORK(&__new_cbq->work,
167 &cn_queue_wrapper); 168 &cn_queue_wrapper);
168 169
169 if (queue_work(dev->cbdev->cn_queue, 170 if (queue_cn_work(__new_cbq,
170 &__new_cbq->work)) 171 &__new_cbq->work))
171 err = 0; 172 err = 0;
172 else { 173 else {
@@ -237,7 +238,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event)
237 238
238 req = (struct cn_notify_req *)ctl->data; 239 req = (struct cn_notify_req *)ctl->data;
239 for (i = 0; i < ctl->idx_notify_num; ++i, ++req) { 240 for (i = 0; i < ctl->idx_notify_num; ++i, ++req) {
240 if (id->idx >= req->first && 241 if (id->idx >= req->first &&
241 id->idx < req->first + req->range) { 242 id->idx < req->first + req->range) {
242 idx_found = 1; 243 idx_found = 1;
243 break; 244 break;
@@ -245,7 +246,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event)
245 } 246 }
246 247
247 for (i = 0; i < ctl->val_notify_num; ++i, ++req) { 248 for (i = 0; i < ctl->val_notify_num; ++i, ++req) {
248 if (id->val >= req->first && 249 if (id->val >= req->first &&
249 id->val < req->first + req->range) { 250 id->val < req->first + req->range) {
250 val_found = 1; 251 val_found = 1;
251 break; 252 break;
@@ -459,7 +460,7 @@ static int __devinit cn_init(void)
459 netlink_kernel_release(dev->nls); 460 netlink_kernel_release(dev->nls);
460 return -EINVAL; 461 return -EINVAL;
461 } 462 }
462 463
463 cn_already_initialized = 1; 464 cn_already_initialized = 1;
464 465
465 err = cn_add_callback(&dev->id, "connector", &cn_callback); 466 err = cn_add_callback(&dev->id, "connector", &cn_callback);