diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2009-11-19 13:10:39 -0500 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2009-11-19 13:10:39 -0500 |
commit | 4d8bb2cbccf6dccaada509aafeb01c6205c9d8c4 (patch) | |
tree | 116689de1f46840915e7f6f196ad334a990a2d5b | |
parent | 3d7a641e544e428191667e8b1f83f96fa46dbd65 (diff) |
SLOW_WORK: Make slow_work_ops ->get_ref/->put_ref optional
Make the ability for the slow-work facility to take references on a work item
optional as not everyone requires this.
Even the internal slow-work stubs them out, so those can be got rid of too.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: David Howells <dhowells@redhat.com>
-rw-r--r-- | Documentation/slow-work.txt | 2 | ||||
-rw-r--r-- | kernel/slow-work.c | 36 |
2 files changed, 17 insertions, 21 deletions
diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt index f12fda31dcdc..c655c517fc68 100644 --- a/Documentation/slow-work.txt +++ b/Documentation/slow-work.txt | |||
@@ -125,7 +125,7 @@ ITEM OPERATIONS | |||
125 | =============== | 125 | =============== |
126 | 126 | ||
127 | Each work item requires a table of operations of type struct slow_work_ops. | 127 | Each work item requires a table of operations of type struct slow_work_ops. |
128 | All members are required: | 128 | Only ->execute() is required, getting and putting of a reference are optional. |
129 | 129 | ||
130 | (*) Get a reference on an item: | 130 | (*) Get a reference on an item: |
131 | 131 | ||
diff --git a/kernel/slow-work.c b/kernel/slow-work.c index dd08f376e406..fccf421eb5c1 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c | |||
@@ -145,6 +145,20 @@ static DECLARE_COMPLETION(slow_work_last_thread_exited); | |||
145 | static int slow_work_user_count; | 145 | static int slow_work_user_count; |
146 | static DEFINE_MUTEX(slow_work_user_lock); | 146 | static DEFINE_MUTEX(slow_work_user_lock); |
147 | 147 | ||
148 | static inline int slow_work_get_ref(struct slow_work *work) | ||
149 | { | ||
150 | if (work->ops->get_ref) | ||
151 | return work->ops->get_ref(work); | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | static inline void slow_work_put_ref(struct slow_work *work) | ||
157 | { | ||
158 | if (work->ops->put_ref) | ||
159 | work->ops->put_ref(work); | ||
160 | } | ||
161 | |||
148 | /* | 162 | /* |
149 | * Calculate the maximum number of active threads in the pool that are | 163 | * Calculate the maximum number of active threads in the pool that are |
150 | * permitted to process very slow work items. | 164 | * permitted to process very slow work items. |
@@ -248,7 +262,7 @@ static bool slow_work_execute(int id) | |||
248 | } | 262 | } |
249 | 263 | ||
250 | /* sort out the race between module unloading and put_ref() */ | 264 | /* sort out the race between module unloading and put_ref() */ |
251 | work->ops->put_ref(work); | 265 | slow_work_put_ref(work); |
252 | 266 | ||
253 | #ifdef CONFIG_MODULES | 267 | #ifdef CONFIG_MODULES |
254 | module = slow_work_thread_processing[id]; | 268 | module = slow_work_thread_processing[id]; |
@@ -309,7 +323,6 @@ int slow_work_enqueue(struct slow_work *work) | |||
309 | BUG_ON(slow_work_user_count <= 0); | 323 | BUG_ON(slow_work_user_count <= 0); |
310 | BUG_ON(!work); | 324 | BUG_ON(!work); |
311 | BUG_ON(!work->ops); | 325 | BUG_ON(!work->ops); |
312 | BUG_ON(!work->ops->get_ref); | ||
313 | 326 | ||
314 | /* when honouring an enqueue request, we only promise that we will run | 327 | /* when honouring an enqueue request, we only promise that we will run |
315 | * the work function in the future; we do not promise to run it once | 328 | * the work function in the future; we do not promise to run it once |
@@ -339,7 +352,7 @@ int slow_work_enqueue(struct slow_work *work) | |||
339 | if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { | 352 | if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { |
340 | set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); | 353 | set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); |
341 | } else { | 354 | } else { |
342 | if (work->ops->get_ref(work) < 0) | 355 | if (slow_work_get_ref(work) < 0) |
343 | goto cant_get_ref; | 356 | goto cant_get_ref; |
344 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) | 357 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) |
345 | list_add_tail(&work->link, &vslow_work_queue); | 358 | list_add_tail(&work->link, &vslow_work_queue); |
@@ -480,21 +493,6 @@ static void slow_work_cull_timeout(unsigned long data) | |||
480 | } | 493 | } |
481 | 494 | ||
482 | /* | 495 | /* |
483 | * Get a reference on slow work thread starter | ||
484 | */ | ||
485 | static int slow_work_new_thread_get_ref(struct slow_work *work) | ||
486 | { | ||
487 | return 0; | ||
488 | } | ||
489 | |||
490 | /* | ||
491 | * Drop a reference on slow work thread starter | ||
492 | */ | ||
493 | static void slow_work_new_thread_put_ref(struct slow_work *work) | ||
494 | { | ||
495 | } | ||
496 | |||
497 | /* | ||
498 | * Start a new slow work thread | 496 | * Start a new slow work thread |
499 | */ | 497 | */ |
500 | static void slow_work_new_thread_execute(struct slow_work *work) | 498 | static void slow_work_new_thread_execute(struct slow_work *work) |
@@ -529,8 +527,6 @@ static void slow_work_new_thread_execute(struct slow_work *work) | |||
529 | 527 | ||
530 | static const struct slow_work_ops slow_work_new_thread_ops = { | 528 | static const struct slow_work_ops slow_work_new_thread_ops = { |
531 | .owner = THIS_MODULE, | 529 | .owner = THIS_MODULE, |
532 | .get_ref = slow_work_new_thread_get_ref, | ||
533 | .put_ref = slow_work_new_thread_put_ref, | ||
534 | .execute = slow_work_new_thread_execute, | 530 | .execute = slow_work_new_thread_execute, |
535 | }; | 531 | }; |
536 | 532 | ||