diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2009-11-19 13:10:47 -0500 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2009-11-19 13:10:47 -0500 |
commit | 6b8268b17a1ffc942bc72d7d00274e433d6b6719 (patch) | |
tree | bd293facd4b805fc05588fcaf024e964a0bb1cca | |
parent | 0160950297c08f8233c89b9f9e7dd59cfb080809 (diff) |
SLOW_WORK: Add delayed_slow_work support
This adds support for starting slow work with a delay, similar
to the functionality we have for workqueues.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: David Howells <dhowells@redhat.com>
-rw-r--r-- | Documentation/slow-work.txt | 16 | ||||
-rw-r--r-- | include/linux/slow-work.h | 29 | ||||
-rw-r--r-- | kernel/slow-work.c | 129 |
3 files changed, 171 insertions, 3 deletions
diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt index 2e384bd4dead..a9d1b0ffdded 100644 --- a/Documentation/slow-work.txt +++ b/Documentation/slow-work.txt | |||
@@ -41,6 +41,13 @@ expand files, provided the time taken to do so isn't too long. | |||
41 | Operations of both types may sleep during execution, thus tying up the thread | 41 | Operations of both types may sleep during execution, thus tying up the thread |
42 | loaned to it. | 42 | loaned to it. |
43 | 43 | ||
44 | A further class of work item is available, based on the slow work item class: | ||
45 | |||
46 | (*) Delayed slow work items. | ||
47 | |||
48 | These are slow work items that have a timer to defer queueing of the item for | ||
49 | a while. | ||
50 | |||
44 | 51 | ||
45 | THREAD-TO-CLASS ALLOCATION | 52 | THREAD-TO-CLASS ALLOCATION |
46 | -------------------------- | 53 | -------------------------- |
@@ -95,6 +102,10 @@ Slow work items may then be set up by: | |||
95 | 102 | ||
96 | or: | 103 | or: |
97 | 104 | ||
105 | delayed_slow_work_init(&myitem, &myitem_ops); | ||
106 | |||
107 | or: | ||
108 | |||
98 | vslow_work_init(&myitem, &myitem_ops); | 109 | vslow_work_init(&myitem, &myitem_ops); |
99 | 110 | ||
100 | depending on its class. | 111 | depending on its class. |
@@ -104,7 +115,9 @@ A suitably set up work item can then be enqueued for processing: | |||
104 | int ret = slow_work_enqueue(&myitem); | 115 | int ret = slow_work_enqueue(&myitem); |
105 | 116 | ||
106 | This will return a -ve error if the thread pool is unable to gain a reference | 117 | This will return a -ve error if the thread pool is unable to gain a reference |
107 | on the item, 0 otherwise. | 118 | on the item, 0 otherwise, or (for delayed work): |
119 | |||
120 | int ret = delayed_slow_work_enqueue(&myitem, my_jiffy_delay); | ||
108 | 121 | ||
109 | 122 | ||
110 | The items are reference counted, so there ought to be no need for a flush | 123 | The items are reference counted, so there ought to be no need for a flush |
@@ -112,6 +125,7 @@ operation. But as the reference counting is optional, means to cancel | |||
112 | existing work items are also included: | 125 | existing work items are also included: |
113 | 126 | ||
114 | cancel_slow_work(&myitem); | 127 | cancel_slow_work(&myitem); |
128 | cancel_delayed_slow_work(&myitem); | ||
115 | 129 | ||
116 | can be used to cancel pending work. The above cancel function waits for | 130 | can be used to cancel pending work. The above cancel function waits for |
117 | existing work to have been executed (or prevent execution of them, depending | 131 | existing work to have been executed (or prevent execution of them, depending |
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h index eef20182d5b4..b245b9a9cc0b 100644 --- a/include/linux/slow-work.h +++ b/include/linux/slow-work.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #ifdef CONFIG_SLOW_WORK | 17 | #ifdef CONFIG_SLOW_WORK |
18 | 18 | ||
19 | #include <linux/sysctl.h> | 19 | #include <linux/sysctl.h> |
20 | #include <linux/timer.h> | ||
20 | 21 | ||
21 | struct slow_work; | 22 | struct slow_work; |
22 | 23 | ||
@@ -52,10 +53,16 @@ struct slow_work { | |||
52 | #define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ | 53 | #define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ |
53 | #define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ | 54 | #define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ |
54 | #define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */ | 55 | #define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */ |
56 | #define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */ | ||
55 | const struct slow_work_ops *ops; /* operations table for this item */ | 57 | const struct slow_work_ops *ops; /* operations table for this item */ |
56 | struct list_head link; /* link in queue */ | 58 | struct list_head link; /* link in queue */ |
57 | }; | 59 | }; |
58 | 60 | ||
61 | struct delayed_slow_work { | ||
62 | struct slow_work work; | ||
63 | struct timer_list timer; | ||
64 | }; | ||
65 | |||
59 | /** | 66 | /** |
60 | * slow_work_init - Initialise a slow work item | 67 | * slow_work_init - Initialise a slow work item |
61 | * @work: The work item to initialise | 68 | * @work: The work item to initialise |
@@ -72,6 +79,20 @@ static inline void slow_work_init(struct slow_work *work, | |||
72 | } | 79 | } |
73 | 80 | ||
74 | /** | 81 | /** |
82 | * slow_work_init - Initialise a delayed slow work item | ||
83 | * @work: The work item to initialise | ||
84 | * @ops: The operations to use to handle the slow work item | ||
85 | * | ||
86 | * Initialise a delayed slow work item. | ||
87 | */ | ||
88 | static inline void delayed_slow_work_init(struct delayed_slow_work *dwork, | ||
89 | const struct slow_work_ops *ops) | ||
90 | { | ||
91 | init_timer(&dwork->timer); | ||
92 | slow_work_init(&dwork->work, ops); | ||
93 | } | ||
94 | |||
95 | /** | ||
75 | * vslow_work_init - Initialise a very slow work item | 96 | * vslow_work_init - Initialise a very slow work item |
76 | * @work: The work item to initialise | 97 | * @work: The work item to initialise |
77 | * @ops: The operations to use to handle the slow work item | 98 | * @ops: The operations to use to handle the slow work item |
@@ -93,6 +114,14 @@ extern void slow_work_cancel(struct slow_work *work); | |||
93 | extern int slow_work_register_user(struct module *owner); | 114 | extern int slow_work_register_user(struct module *owner); |
94 | extern void slow_work_unregister_user(struct module *owner); | 115 | extern void slow_work_unregister_user(struct module *owner); |
95 | 116 | ||
117 | extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, | ||
118 | unsigned long delay); | ||
119 | |||
120 | static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork) | ||
121 | { | ||
122 | slow_work_cancel(&dwork->work); | ||
123 | } | ||
124 | |||
96 | #ifdef CONFIG_SYSCTL | 125 | #ifdef CONFIG_SYSCTL |
97 | extern ctl_table slow_work_sysctls[]; | 126 | extern ctl_table slow_work_sysctls[]; |
98 | #endif | 127 | #endif |
diff --git a/kernel/slow-work.c b/kernel/slow-work.c index 671cc434532a..f67e1daae93d 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c | |||
@@ -406,11 +406,40 @@ void slow_work_cancel(struct slow_work *work) | |||
406 | bool wait = true, put = false; | 406 | bool wait = true, put = false; |
407 | 407 | ||
408 | set_bit(SLOW_WORK_CANCELLING, &work->flags); | 408 | set_bit(SLOW_WORK_CANCELLING, &work->flags); |
409 | smp_mb(); | ||
410 | |||
411 | /* if the work item is a delayed work item with an active timer, we | ||
412 | * need to wait for the timer to finish _before_ getting the spinlock, | ||
413 | * lest we deadlock against the timer routine | ||
414 | * | ||
415 | * the timer routine will leave DELAYED set if it notices the | ||
416 | * CANCELLING flag in time | ||
417 | */ | ||
418 | if (test_bit(SLOW_WORK_DELAYED, &work->flags)) { | ||
419 | struct delayed_slow_work *dwork = | ||
420 | container_of(work, struct delayed_slow_work, work); | ||
421 | del_timer_sync(&dwork->timer); | ||
422 | } | ||
409 | 423 | ||
410 | spin_lock_irq(&slow_work_queue_lock); | 424 | spin_lock_irq(&slow_work_queue_lock); |
411 | 425 | ||
412 | if (test_bit(SLOW_WORK_PENDING, &work->flags) && | 426 | if (test_bit(SLOW_WORK_DELAYED, &work->flags)) { |
413 | !list_empty(&work->link)) { | 427 | /* the timer routine aborted or never happened, so we are left |
428 | * holding the timer's reference on the item and should just | ||
429 | * drop the pending flag and wait for any ongoing execution to | ||
430 | * finish */ | ||
431 | struct delayed_slow_work *dwork = | ||
432 | container_of(work, struct delayed_slow_work, work); | ||
433 | |||
434 | BUG_ON(timer_pending(&dwork->timer)); | ||
435 | BUG_ON(!list_empty(&work->link)); | ||
436 | |||
437 | clear_bit(SLOW_WORK_DELAYED, &work->flags); | ||
438 | put = true; | ||
439 | clear_bit(SLOW_WORK_PENDING, &work->flags); | ||
440 | |||
441 | } else if (test_bit(SLOW_WORK_PENDING, &work->flags) && | ||
442 | !list_empty(&work->link)) { | ||
414 | /* the link in the pending queue holds a reference on the item | 443 | /* the link in the pending queue holds a reference on the item |
415 | * that we will need to release */ | 444 | * that we will need to release */ |
416 | list_del_init(&work->link); | 445 | list_del_init(&work->link); |
@@ -441,6 +470,102 @@ void slow_work_cancel(struct slow_work *work) | |||
441 | EXPORT_SYMBOL(slow_work_cancel); | 470 | EXPORT_SYMBOL(slow_work_cancel); |
442 | 471 | ||
443 | /* | 472 | /* |
473 | * Handle expiry of the delay timer, indicating that a delayed slow work item | ||
474 | * should now be queued if not cancelled | ||
475 | */ | ||
476 | static void delayed_slow_work_timer(unsigned long data) | ||
477 | { | ||
478 | struct slow_work *work = (struct slow_work *) data; | ||
479 | unsigned long flags; | ||
480 | bool queued = false, put = false; | ||
481 | |||
482 | spin_lock_irqsave(&slow_work_queue_lock, flags); | ||
483 | if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) { | ||
484 | clear_bit(SLOW_WORK_DELAYED, &work->flags); | ||
485 | |||
486 | if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { | ||
487 | /* we discard the reference the timer was holding in | ||
488 | * favour of the one the executor holds */ | ||
489 | set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); | ||
490 | put = true; | ||
491 | } else { | ||
492 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) | ||
493 | list_add_tail(&work->link, &vslow_work_queue); | ||
494 | else | ||
495 | list_add_tail(&work->link, &slow_work_queue); | ||
496 | queued = true; | ||
497 | } | ||
498 | } | ||
499 | |||
500 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | ||
501 | if (put) | ||
502 | slow_work_put_ref(work); | ||
503 | if (queued) | ||
504 | wake_up(&slow_work_thread_wq); | ||
505 | } | ||
506 | |||
507 | /** | ||
508 | * delayed_slow_work_enqueue - Schedule a delayed slow work item for processing | ||
509 | * @dwork: The delayed work item to queue | ||
510 | * @delay: When to start executing the work, in jiffies from now | ||
511 | * | ||
512 | * This is similar to slow_work_enqueue(), but it adds a delay before the work | ||
513 | * is actually queued for processing. | ||
514 | * | ||
515 | * The item can have delayed processing requested on it whilst it is being | ||
516 | * executed. The delay will begin immediately, and if it expires before the | ||
517 | * item finishes executing, the item will be placed back on the queue when it | ||
518 | * has done executing. | ||
519 | */ | ||
520 | int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, | ||
521 | unsigned long delay) | ||
522 | { | ||
523 | struct slow_work *work = &dwork->work; | ||
524 | unsigned long flags; | ||
525 | int ret; | ||
526 | |||
527 | if (delay == 0) | ||
528 | return slow_work_enqueue(&dwork->work); | ||
529 | |||
530 | BUG_ON(slow_work_user_count <= 0); | ||
531 | BUG_ON(!work); | ||
532 | BUG_ON(!work->ops); | ||
533 | |||
534 | if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) | ||
535 | return -ECANCELED; | ||
536 | |||
537 | if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { | ||
538 | spin_lock_irqsave(&slow_work_queue_lock, flags); | ||
539 | |||
540 | if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) | ||
541 | goto cancelled; | ||
542 | |||
543 | /* the timer holds a reference whilst it is pending */ | ||
544 | ret = work->ops->get_ref(work); | ||
545 | if (ret < 0) | ||
546 | goto cant_get_ref; | ||
547 | |||
548 | if (test_and_set_bit(SLOW_WORK_DELAYED, &work->flags)) | ||
549 | BUG(); | ||
550 | dwork->timer.expires = jiffies + delay; | ||
551 | dwork->timer.data = (unsigned long) work; | ||
552 | dwork->timer.function = delayed_slow_work_timer; | ||
553 | add_timer(&dwork->timer); | ||
554 | |||
555 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | ||
556 | } | ||
557 | |||
558 | return 0; | ||
559 | |||
560 | cancelled: | ||
561 | ret = -ECANCELED; | ||
562 | cant_get_ref: | ||
563 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | ||
564 | return ret; | ||
565 | } | ||
566 | EXPORT_SYMBOL(delayed_slow_work_enqueue); | ||
567 | |||
568 | /* | ||
444 | * Schedule a cull of the thread pool at some time in the near future | 569 | * Schedule a cull of the thread pool at some time in the near future |
445 | */ | 570 | */ |
446 | static void slow_work_schedule_cull(void) | 571 | static void slow_work_schedule_cull(void) |