aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-11-19 13:10:43 -0500
committerDavid Howells <dhowells@redhat.com>2009-11-19 13:10:43 -0500
commit0160950297c08f8233c89b9f9e7dd59cfb080809 (patch)
tree4910bfb7ab5b4000a1679fd165d217ff48226268
parent4d8bb2cbccf6dccaada509aafeb01c6205c9d8c4 (diff)
SLOW_WORK: Add support for cancellation of slow work
Add support for cancellation of queued slow work and delayed slow work items. The cancellation functions will wait for items that are pending or undergoing execution to be discarded by the slow work facility. Attempting to enqueue work that is in the process of being cancelled will result in ECANCELED. Signed-off-by: Jens Axboe <jens.axboe@oracle.com> Signed-off-by: David Howells <dhowells@redhat.com>
-rw-r--r--Documentation/slow-work.txt12
-rw-r--r--include/linux/slow-work.h2
-rw-r--r--kernel/slow-work.c81
3 files changed, 88 insertions, 7 deletions
diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt
index c655c517fc68..2e384bd4dead 100644
--- a/Documentation/slow-work.txt
+++ b/Documentation/slow-work.txt
@@ -108,7 +108,17 @@ on the item, 0 otherwise.
108 108
109 109
110The items are reference counted, so there ought to be no need for a flush 110The items are reference counted, so there ought to be no need for a flush
111operation. When all a module's slow work items have been processed, and the 111operation. But as the reference counting is optional, means to cancel
112existing work items are also included:
113
114 cancel_slow_work(&myitem);
115
116can be used to cancel pending work. The above cancel function waits for
117existing work to have been executed (or prevent execution of them, depending
118on timing).
119
120
121When all a module's slow work items have been processed, and the
112module has no further interest in the facility, it should unregister its 122module has no further interest in the facility, it should unregister its
113interest: 123interest:
114 124
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h
index 9adb2b30754f..eef20182d5b4 100644
--- a/include/linux/slow-work.h
+++ b/include/linux/slow-work.h
@@ -51,6 +51,7 @@ struct slow_work {
51#define SLOW_WORK_EXECUTING 1 /* item currently executing */ 51#define SLOW_WORK_EXECUTING 1 /* item currently executing */
52#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ 52#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */
53#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ 53#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */
54#define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */
54 const struct slow_work_ops *ops; /* operations table for this item */ 55 const struct slow_work_ops *ops; /* operations table for this item */
55 struct list_head link; /* link in queue */ 56 struct list_head link; /* link in queue */
56}; 57};
@@ -88,6 +89,7 @@ static inline void vslow_work_init(struct slow_work *work,
88} 89}
89 90
90extern int slow_work_enqueue(struct slow_work *work); 91extern int slow_work_enqueue(struct slow_work *work);
92extern void slow_work_cancel(struct slow_work *work);
91extern int slow_work_register_user(struct module *owner); 93extern int slow_work_register_user(struct module *owner);
92extern void slow_work_unregister_user(struct module *owner); 94extern void slow_work_unregister_user(struct module *owner);
93 95
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index fccf421eb5c1..671cc434532a 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -236,12 +236,17 @@ static bool slow_work_execute(int id)
236 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) 236 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
237 BUG(); 237 BUG();
238 238
239 work->ops->execute(work); 239 /* don't execute if the work is in the process of being cancelled */
240 if (!test_bit(SLOW_WORK_CANCELLING, &work->flags))
241 work->ops->execute(work);
240 242
241 if (very_slow) 243 if (very_slow)
242 atomic_dec(&vslow_work_executing_count); 244 atomic_dec(&vslow_work_executing_count);
243 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); 245 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
244 246
247 /* wake up anyone waiting for this work to be complete */
248 wake_up_bit(&work->flags, SLOW_WORK_EXECUTING);
249
245 /* if someone tried to enqueue the item whilst we were executing it, 250 /* if someone tried to enqueue the item whilst we were executing it,
246 * then it'll be left unenqueued to avoid multiple threads trying to 251 * then it'll be left unenqueued to avoid multiple threads trying to
247 * execute it simultaneously 252 * execute it simultaneously
@@ -314,11 +319,16 @@ auto_requeue:
314 * allowed to pick items to execute. This ensures that very slow items won't 319 * allowed to pick items to execute. This ensures that very slow items won't
315 * overly block ones that are just ordinarily slow. 320 * overly block ones that are just ordinarily slow.
316 * 321 *
317 * Returns 0 if successful, -EAGAIN if not. 322 * Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is
323 * attempted queued)
318 */ 324 */
319int slow_work_enqueue(struct slow_work *work) 325int slow_work_enqueue(struct slow_work *work)
320{ 326{
321 unsigned long flags; 327 unsigned long flags;
328 int ret;
329
330 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
331 return -ECANCELED;
322 332
323 BUG_ON(slow_work_user_count <= 0); 333 BUG_ON(slow_work_user_count <= 0);
324 BUG_ON(!work); 334 BUG_ON(!work);
@@ -335,6 +345,9 @@ int slow_work_enqueue(struct slow_work *work)
335 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { 345 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
336 spin_lock_irqsave(&slow_work_queue_lock, flags); 346 spin_lock_irqsave(&slow_work_queue_lock, flags);
337 347
348 if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags)))
349 goto cancelled;
350
338 /* we promise that we will not attempt to execute the work 351 /* we promise that we will not attempt to execute the work
339 * function in more than one thread simultaneously 352 * function in more than one thread simultaneously
340 * 353 *
@@ -352,8 +365,9 @@ int slow_work_enqueue(struct slow_work *work)
352 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { 365 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
353 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); 366 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
354 } else { 367 } else {
355 if (slow_work_get_ref(work) < 0) 368 ret = slow_work_get_ref(work);
356 goto cant_get_ref; 369 if (ret < 0)
370 goto failed;
357 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 371 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
358 list_add_tail(&work->link, &vslow_work_queue); 372 list_add_tail(&work->link, &vslow_work_queue);
359 else 373 else
@@ -365,12 +379,67 @@ int slow_work_enqueue(struct slow_work *work)
365 } 379 }
366 return 0; 380 return 0;
367 381
368cant_get_ref: 382cancelled:
383 ret = -ECANCELED;
384failed:
369 spin_unlock_irqrestore(&slow_work_queue_lock, flags); 385 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
370 return -EAGAIN; 386 return ret;
371} 387}
372EXPORT_SYMBOL(slow_work_enqueue); 388EXPORT_SYMBOL(slow_work_enqueue);
373 389
390static int slow_work_wait(void *word)
391{
392 schedule();
393 return 0;
394}
395
396/**
397 * slow_work_cancel - Cancel a slow work item
398 * @work: The work item to cancel
399 *
400 * This function will cancel a previously enqueued work item. If we cannot
401 * cancel the work item, it is guarenteed to have run when this function
402 * returns.
403 */
404void slow_work_cancel(struct slow_work *work)
405{
406 bool wait = true, put = false;
407
408 set_bit(SLOW_WORK_CANCELLING, &work->flags);
409
410 spin_lock_irq(&slow_work_queue_lock);
411
412 if (test_bit(SLOW_WORK_PENDING, &work->flags) &&
413 !list_empty(&work->link)) {
414 /* the link in the pending queue holds a reference on the item
415 * that we will need to release */
416 list_del_init(&work->link);
417 wait = false;
418 put = true;
419 clear_bit(SLOW_WORK_PENDING, &work->flags);
420
421 } else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) {
422 /* the executor is holding our only reference on the item, so
423 * we merely need to wait for it to finish executing */
424 clear_bit(SLOW_WORK_PENDING, &work->flags);
425 }
426
427 spin_unlock_irq(&slow_work_queue_lock);
428
429 /* the EXECUTING flag is set by the executor whilst the spinlock is set
430 * and before the item is dequeued - so assuming the above doesn't
431 * actually dequeue it, simply waiting for the EXECUTING flag to be
432 * released here should be sufficient */
433 if (wait)
434 wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait,
435 TASK_UNINTERRUPTIBLE);
436
437 clear_bit(SLOW_WORK_CANCELLING, &work->flags);
438 if (put)
439 slow_work_put_ref(work);
440}
441EXPORT_SYMBOL(slow_work_cancel);
442
374/* 443/*
375 * Schedule a cull of the thread pool at some time in the near future 444 * Schedule a cull of the thread pool at some time in the near future
376 */ 445 */