diff options
author | Tejun Heo <tj@kernel.org> | 2010-10-14 19:55:22 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-10-24 17:20:01 -0400 |
commit | 6cebb17beece746de86793cd549e84740896cf4a (patch) | |
tree | 6f5a3ee1ab6ee0147688f7e2343055645f7a64e7 | |
parent | 229aebb873e29726b91e076161649cf45154b0bf (diff) |
connector: remove lazy workqueue creation
Commit 1a5645bc (connector: create connector workqueue only while
needed once) implements lazy workqueue creation for connector
workqueue. With cmwq now in place, lazy workqueue creation doesn't
make much sense while adding a lot of complexity. Remove it and
allocate an ordered workqueue during initialization.
This also removes a call to flush_scheduled_work() which is deprecated
and scheduled to be removed.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/connector/cn_queue.c | 75 | ||||
-rw-r--r-- | drivers/connector/connector.c | 9 | ||||
-rw-r--r-- | include/linux/connector.h | 8 |
3 files changed, 12 insertions, 80 deletions
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c index 210338ea222f..81270d221e5a 100644 --- a/drivers/connector/cn_queue.c +++ b/drivers/connector/cn_queue.c | |||
@@ -31,48 +31,6 @@ | |||
31 | #include <linux/connector.h> | 31 | #include <linux/connector.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | 33 | ||
34 | |||
35 | /* | ||
36 | * This job is sent to the kevent workqueue. | ||
37 | * While no event is once sent to any callback, the connector workqueue | ||
38 | * is not created to avoid a useless waiting kernel task. | ||
39 | * Once the first event is received, we create this dedicated workqueue which | ||
40 | * is necessary because the flow of data can be high and we don't want | ||
41 | * to encumber keventd with that. | ||
42 | */ | ||
43 | static void cn_queue_create(struct work_struct *work) | ||
44 | { | ||
45 | struct cn_queue_dev *dev; | ||
46 | |||
47 | dev = container_of(work, struct cn_queue_dev, wq_creation); | ||
48 | |||
49 | dev->cn_queue = create_singlethread_workqueue(dev->name); | ||
50 | /* If we fail, we will use keventd for all following connector jobs */ | ||
51 | WARN_ON(!dev->cn_queue); | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * Queue a data sent to a callback. | ||
56 | * If the connector workqueue is already created, we queue the job on it. | ||
57 | * Otherwise, we queue the job to kevent and queue the connector workqueue | ||
58 | * creation too. | ||
59 | */ | ||
60 | int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work) | ||
61 | { | ||
62 | struct cn_queue_dev *pdev = cbq->pdev; | ||
63 | |||
64 | if (likely(pdev->cn_queue)) | ||
65 | return queue_work(pdev->cn_queue, work); | ||
66 | |||
67 | /* Don't create the connector workqueue twice */ | ||
68 | if (atomic_inc_return(&pdev->wq_requested) == 1) | ||
69 | schedule_work(&pdev->wq_creation); | ||
70 | else | ||
71 | atomic_dec(&pdev->wq_requested); | ||
72 | |||
73 | return schedule_work(work); | ||
74 | } | ||
75 | |||
76 | void cn_queue_wrapper(struct work_struct *work) | 34 | void cn_queue_wrapper(struct work_struct *work) |
77 | { | 35 | { |
78 | struct cn_callback_entry *cbq = | 36 | struct cn_callback_entry *cbq = |
@@ -111,11 +69,7 @@ cn_queue_alloc_callback_entry(char *name, struct cb_id *id, | |||
111 | 69 | ||
112 | static void cn_queue_free_callback(struct cn_callback_entry *cbq) | 70 | static void cn_queue_free_callback(struct cn_callback_entry *cbq) |
113 | { | 71 | { |
114 | /* The first jobs have been sent to kevent, flush them too */ | 72 | flush_workqueue(cbq->pdev->cn_queue); |
115 | flush_scheduled_work(); | ||
116 | if (cbq->pdev->cn_queue) | ||
117 | flush_workqueue(cbq->pdev->cn_queue); | ||
118 | |||
119 | kfree(cbq); | 73 | kfree(cbq); |
120 | } | 74 | } |
121 | 75 | ||
@@ -193,11 +147,14 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls) | |||
193 | atomic_set(&dev->refcnt, 0); | 147 | atomic_set(&dev->refcnt, 0); |
194 | INIT_LIST_HEAD(&dev->queue_list); | 148 | INIT_LIST_HEAD(&dev->queue_list); |
195 | spin_lock_init(&dev->queue_lock); | 149 | spin_lock_init(&dev->queue_lock); |
196 | init_waitqueue_head(&dev->wq_created); | ||
197 | 150 | ||
198 | dev->nls = nls; | 151 | dev->nls = nls; |
199 | 152 | ||
200 | INIT_WORK(&dev->wq_creation, cn_queue_create); | 153 | dev->cn_queue = alloc_ordered_workqueue(dev->name, 0); |
154 | if (!dev->cn_queue) { | ||
155 | kfree(dev); | ||
156 | return NULL; | ||
157 | } | ||
201 | 158 | ||
202 | return dev; | 159 | return dev; |
203 | } | 160 | } |
@@ -205,25 +162,9 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls) | |||
205 | void cn_queue_free_dev(struct cn_queue_dev *dev) | 162 | void cn_queue_free_dev(struct cn_queue_dev *dev) |
206 | { | 163 | { |
207 | struct cn_callback_entry *cbq, *n; | 164 | struct cn_callback_entry *cbq, *n; |
208 | long timeout; | ||
209 | DEFINE_WAIT(wait); | ||
210 | |||
211 | /* Flush the first pending jobs queued on kevent */ | ||
212 | flush_scheduled_work(); | ||
213 | |||
214 | /* If the connector workqueue creation is still pending, wait for it */ | ||
215 | prepare_to_wait(&dev->wq_created, &wait, TASK_UNINTERRUPTIBLE); | ||
216 | if (atomic_read(&dev->wq_requested) && !dev->cn_queue) { | ||
217 | timeout = schedule_timeout(HZ * 2); | ||
218 | if (!timeout && !dev->cn_queue) | ||
219 | WARN_ON(1); | ||
220 | } | ||
221 | finish_wait(&dev->wq_created, &wait); | ||
222 | 165 | ||
223 | if (dev->cn_queue) { | 166 | flush_workqueue(dev->cn_queue); |
224 | flush_workqueue(dev->cn_queue); | 167 | destroy_workqueue(dev->cn_queue); |
225 | destroy_workqueue(dev->cn_queue); | ||
226 | } | ||
227 | 168 | ||
228 | spin_lock_bh(&dev->queue_lock); | 169 | spin_lock_bh(&dev->queue_lock); |
229 | list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) | 170 | list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) |
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index 1d48f40342cb..e16c3fa8d2e3 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c | |||
@@ -133,7 +133,8 @@ static int cn_call_callback(struct sk_buff *skb) | |||
133 | __cbq->data.skb == NULL)) { | 133 | __cbq->data.skb == NULL)) { |
134 | __cbq->data.skb = skb; | 134 | __cbq->data.skb = skb; |
135 | 135 | ||
136 | if (queue_cn_work(__cbq, &__cbq->work)) | 136 | if (queue_work(dev->cbdev->cn_queue, |
137 | &__cbq->work)) | ||
137 | err = 0; | 138 | err = 0; |
138 | else | 139 | else |
139 | err = -EINVAL; | 140 | err = -EINVAL; |
@@ -148,13 +149,11 @@ static int cn_call_callback(struct sk_buff *skb) | |||
148 | d->callback = __cbq->data.callback; | 149 | d->callback = __cbq->data.callback; |
149 | d->free = __new_cbq; | 150 | d->free = __new_cbq; |
150 | 151 | ||
151 | __new_cbq->pdev = __cbq->pdev; | ||
152 | |||
153 | INIT_WORK(&__new_cbq->work, | 152 | INIT_WORK(&__new_cbq->work, |
154 | &cn_queue_wrapper); | 153 | &cn_queue_wrapper); |
155 | 154 | ||
156 | if (queue_cn_work(__new_cbq, | 155 | if (queue_work(dev->cbdev->cn_queue, |
157 | &__new_cbq->work)) | 156 | &__new_cbq->work)) |
158 | err = 0; | 157 | err = 0; |
159 | else { | 158 | else { |
160 | kfree(__new_cbq); | 159 | kfree(__new_cbq); |
diff --git a/include/linux/connector.h b/include/linux/connector.h index 3a779ffba60b..7e8ca75d2dad 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h | |||
@@ -88,12 +88,6 @@ struct cn_queue_dev { | |||
88 | unsigned char name[CN_CBQ_NAMELEN]; | 88 | unsigned char name[CN_CBQ_NAMELEN]; |
89 | 89 | ||
90 | struct workqueue_struct *cn_queue; | 90 | struct workqueue_struct *cn_queue; |
91 | /* Sent to kevent to create cn_queue only when needed */ | ||
92 | struct work_struct wq_creation; | ||
93 | /* Tell if the wq_creation job is pending/completed */ | ||
94 | atomic_t wq_requested; | ||
95 | /* Wait for cn_queue to be created */ | ||
96 | wait_queue_head_t wq_created; | ||
97 | 91 | ||
98 | struct list_head queue_list; | 92 | struct list_head queue_list; |
99 | spinlock_t queue_lock; | 93 | spinlock_t queue_lock; |
@@ -141,8 +135,6 @@ int cn_netlink_send(struct cn_msg *, u32, gfp_t); | |||
141 | int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); | 135 | int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); |
142 | void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); | 136 | void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); |
143 | 137 | ||
144 | int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work); | ||
145 | |||
146 | struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *); | 138 | struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *); |
147 | void cn_queue_free_dev(struct cn_queue_dev *dev); | 139 | void cn_queue_free_dev(struct cn_queue_dev *dev); |
148 | 140 | ||