aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-11-19 13:10:47 -0500
committerDavid Howells <dhowells@redhat.com>2009-11-19 13:10:47 -0500
commit6b8268b17a1ffc942bc72d7d00274e433d6b6719 (patch)
treebd293facd4b805fc05588fcaf024e964a0bb1cca /kernel
parent0160950297c08f8233c89b9f9e7dd59cfb080809 (diff)
SLOW_WORK: Add delayed_slow_work support
This adds support for starting slow work with a delay, similar to the functionality we have for workqueues. Signed-off-by: Jens Axboe <jens.axboe@oracle.com> Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/slow-work.c129
1 files changed, 127 insertions, 2 deletions
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 671cc434532a..f67e1daae93d 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -406,11 +406,40 @@ void slow_work_cancel(struct slow_work *work)
406 bool wait = true, put = false; 406 bool wait = true, put = false;
407 407
408 set_bit(SLOW_WORK_CANCELLING, &work->flags); 408 set_bit(SLOW_WORK_CANCELLING, &work->flags);
409 smp_mb();
410
411 /* if the work item is a delayed work item with an active timer, we
412 * need to wait for the timer to finish _before_ getting the spinlock,
413 * lest we deadlock against the timer routine
414 *
415 * the timer routine will leave DELAYED set if it notices the
416 * CANCELLING flag in time
417 */
418 if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
419 struct delayed_slow_work *dwork =
420 container_of(work, struct delayed_slow_work, work);
421 del_timer_sync(&dwork->timer);
422 }
409 423
410 spin_lock_irq(&slow_work_queue_lock); 424 spin_lock_irq(&slow_work_queue_lock);
411 425
412 if (test_bit(SLOW_WORK_PENDING, &work->flags) && 426 if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
413 !list_empty(&work->link)) { 427 /* the timer routine aborted or never happened, so we are left
428 * holding the timer's reference on the item and should just
429 * drop the pending flag and wait for any ongoing execution to
430 * finish */
431 struct delayed_slow_work *dwork =
432 container_of(work, struct delayed_slow_work, work);
433
434 BUG_ON(timer_pending(&dwork->timer));
435 BUG_ON(!list_empty(&work->link));
436
437 clear_bit(SLOW_WORK_DELAYED, &work->flags);
438 put = true;
439 clear_bit(SLOW_WORK_PENDING, &work->flags);
440
441 } else if (test_bit(SLOW_WORK_PENDING, &work->flags) &&
442 !list_empty(&work->link)) {
414 /* the link in the pending queue holds a reference on the item 443 /* the link in the pending queue holds a reference on the item
415 * that we will need to release */ 444 * that we will need to release */
416 list_del_init(&work->link); 445 list_del_init(&work->link);
@@ -441,6 +470,102 @@ void slow_work_cancel(struct slow_work *work)
441EXPORT_SYMBOL(slow_work_cancel); 470EXPORT_SYMBOL(slow_work_cancel);
442 471
443/* 472/*
473 * Handle expiry of the delay timer, indicating that a delayed slow work item
474 * should now be queued if not cancelled
475 */
476static void delayed_slow_work_timer(unsigned long data)
477{
478 struct slow_work *work = (struct slow_work *) data;
479 unsigned long flags;
480 bool queued = false, put = false;
481
482 spin_lock_irqsave(&slow_work_queue_lock, flags);
483 if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) {
484 clear_bit(SLOW_WORK_DELAYED, &work->flags);
485
486 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
487 /* we discard the reference the timer was holding in
488 * favour of the one the executor holds */
489 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
490 put = true;
491 } else {
492 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
493 list_add_tail(&work->link, &vslow_work_queue);
494 else
495 list_add_tail(&work->link, &slow_work_queue);
496 queued = true;
497 }
498 }
499
500 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
501 if (put)
502 slow_work_put_ref(work);
503 if (queued)
504 wake_up(&slow_work_thread_wq);
505}
506
507/**
508 * delayed_slow_work_enqueue - Schedule a delayed slow work item for processing
509 * @dwork: The delayed work item to queue
510 * @delay: When to start executing the work, in jiffies from now
511 *
512 * This is similar to slow_work_enqueue(), but it adds a delay before the work
513 * is actually queued for processing.
514 *
515 * The item can have delayed processing requested on it whilst it is being
516 * executed. The delay will begin immediately, and if it expires before the
517 * item finishes executing, the item will be placed back on the queue when it
518 * has done executing.
519 */
520int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
521 unsigned long delay)
522{
523 struct slow_work *work = &dwork->work;
524 unsigned long flags;
525 int ret;
526
527 if (delay == 0)
528 return slow_work_enqueue(&dwork->work);
529
530 BUG_ON(slow_work_user_count <= 0);
531 BUG_ON(!work);
532 BUG_ON(!work->ops);
533
534 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
535 return -ECANCELED;
536
537 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
538 spin_lock_irqsave(&slow_work_queue_lock, flags);
539
540 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
541 goto cancelled;
542
543 /* the timer holds a reference whilst it is pending */
544 ret = work->ops->get_ref(work);
545 if (ret < 0)
546 goto cant_get_ref;
547
548 if (test_and_set_bit(SLOW_WORK_DELAYED, &work->flags))
549 BUG();
550 dwork->timer.expires = jiffies + delay;
551 dwork->timer.data = (unsigned long) work;
552 dwork->timer.function = delayed_slow_work_timer;
553 add_timer(&dwork->timer);
554
555 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
556 }
557
558 return 0;
559
560cancelled:
561 ret = -ECANCELED;
562cant_get_ref:
563 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
564 return ret;
565}
566EXPORT_SYMBOL(delayed_slow_work_enqueue);
567
568/*
444 * Schedule a cull of the thread pool at some time in the near future 569 * Schedule a cull of the thread pool at some time in the near future
445 */ 570 */
446static void slow_work_schedule_cull(void) 571static void slow_work_schedule_cull(void)