aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/slow-work.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2009-11-19 13:10:57 -0500
committerDavid Howells <dhowells@redhat.com>2009-11-19 13:10:57 -0500
commit3bde31a4ac225cb5805be02eff6eaaf7e0766ccd (patch)
tree9fb757ab7d46e0c37fb5e88d3185f1861fbc794e /kernel/slow-work.c
parent31ba99d304494cb28fa8671ccc769c5543e1165d (diff)
SLOW_WORK: Allow a requeueable work item to sleep till the thread is needed
Add a function to allow a requeueable work item to sleep till the thread processing it is needed by the slow-work facility to perform other work. Sometimes a work item can't progress immediately, but must wait for the completion of another work item that's currently being processed by another slow-work thread. In some circumstances, the waiting item could instead - theoretically - put itself back on the queue and yield its thread back to the slow-work facility, thus waiting till it gets processing time again before attempting to progress. This would allow other work items processing time on that thread. However, this only works if there is something on the queue for it to queue behind - otherwise it will just get a thread again immediately, and will end up cycling between the queue and the thread, eating up valuable CPU time. So, slow_work_sleep_till_thread_needed() is provided such that an item can put itself on a wait queue that will wake it up when the event it is actually interested in occurs, then call this function in lieu of calling schedule(). This function will then sleep until either the item's event occurs or another work item appears on the queue. If another work item is queued, but the item's event hasn't occurred, then the work item should requeue itself and yield the thread back to the slow-work facility by returning. This can be used by CacheFiles for an object that is being created on one thread to wait for an object being deleted on another thread where there is nothing on the queue for the creation to go and wait behind. As soon as an item appears on the queue that could be given thread time instead, CacheFiles can stick the creating object back on the queue and return to the slow-work facility - assuming the object deletion didn't also complete. Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'kernel/slow-work.c')
-rw-r--r--kernel/slow-work.c94
1 files changed, 85 insertions, 9 deletions
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index b763bc2d2670..da94f3c101af 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -133,6 +133,15 @@ LIST_HEAD(vslow_work_queue);
133DEFINE_SPINLOCK(slow_work_queue_lock); 133DEFINE_SPINLOCK(slow_work_queue_lock);
134 134
135/* 135/*
136 * The following are two wait queues that get pinged when a work item is placed
137 * on an empty queue. These allow work items that are hogging a thread by
138 * sleeping in a way that could be deferred to yield their thread and enqueue
139 * themselves.
140 */
141static DECLARE_WAIT_QUEUE_HEAD(slow_work_queue_waits_for_occupation);
142static DECLARE_WAIT_QUEUE_HEAD(vslow_work_queue_waits_for_occupation);
143
144/*
136 * The thread controls. A variable used to signal to the threads that they 145 * The thread controls. A variable used to signal to the threads that they
137 * should exit when the queue is empty, a waitqueue used by the threads to wait 146 * should exit when the queue is empty, a waitqueue used by the threads to wait
138 * for signals, and a completion set by the last thread to exit. 147 * for signals, and a completion set by the last thread to exit.
@@ -306,6 +315,50 @@ auto_requeue:
306} 315}
307 316
308/** 317/**
318 * slow_work_sleep_till_thread_needed - Sleep till thread needed by other work
319 * work: The work item under execution that wants to sleep
320 * _timeout: Scheduler sleep timeout
321 *
322 * Allow a requeueable work item to sleep on a slow-work processor thread until
323 * that thread is needed to do some other work or the sleep is interrupted by
324 * some other event.
325 *
326 * The caller must set up a wake up event before calling this and must have set
327 * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
328 * condition before calling this function as no test is made here.
329 *
330 * False is returned if there is nothing on the queue; true is returned if the
331 * work item should be requeued
332 */
333bool slow_work_sleep_till_thread_needed(struct slow_work *work,
334 signed long *_timeout)
335{
336 wait_queue_head_t *wfo_wq;
337 struct list_head *queue;
338
339 DEFINE_WAIT(wait);
340
341 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
342 wfo_wq = &vslow_work_queue_waits_for_occupation;
343 queue = &vslow_work_queue;
344 } else {
345 wfo_wq = &slow_work_queue_waits_for_occupation;
346 queue = &slow_work_queue;
347 }
348
349 if (!list_empty(queue))
350 return true;
351
352 add_wait_queue_exclusive(wfo_wq, &wait);
353 if (list_empty(queue))
354 *_timeout = schedule_timeout(*_timeout);
355 finish_wait(wfo_wq, &wait);
356
357 return !list_empty(queue);
358}
359EXPORT_SYMBOL(slow_work_sleep_till_thread_needed);
360
361/**
309 * slow_work_enqueue - Schedule a slow work item for processing 362 * slow_work_enqueue - Schedule a slow work item for processing
310 * @work: The work item to queue 363 * @work: The work item to queue
311 * 364 *
@@ -335,6 +388,8 @@ auto_requeue:
335 */ 388 */
336int slow_work_enqueue(struct slow_work *work) 389int slow_work_enqueue(struct slow_work *work)
337{ 390{
391 wait_queue_head_t *wfo_wq;
392 struct list_head *queue;
338 unsigned long flags; 393 unsigned long flags;
339 int ret; 394 int ret;
340 395
@@ -354,6 +409,14 @@ int slow_work_enqueue(struct slow_work *work)
354 * maintaining our promise 409 * maintaining our promise
355 */ 410 */
356 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { 411 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
412 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
413 wfo_wq = &vslow_work_queue_waits_for_occupation;
414 queue = &vslow_work_queue;
415 } else {
416 wfo_wq = &slow_work_queue_waits_for_occupation;
417 queue = &slow_work_queue;
418 }
419
357 spin_lock_irqsave(&slow_work_queue_lock, flags); 420 spin_lock_irqsave(&slow_work_queue_lock, flags);
358 421
359 if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags))) 422 if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags)))
@@ -380,11 +443,13 @@ int slow_work_enqueue(struct slow_work *work)
380 if (ret < 0) 443 if (ret < 0)
381 goto failed; 444 goto failed;
382 slow_work_mark_time(work); 445 slow_work_mark_time(work);
383 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 446 list_add_tail(&work->link, queue);
384 list_add_tail(&work->link, &vslow_work_queue);
385 else
386 list_add_tail(&work->link, &slow_work_queue);
387 wake_up(&slow_work_thread_wq); 447 wake_up(&slow_work_thread_wq);
448
449 /* if someone who could be requeued is sleeping on a
450 * thread, then ask them to yield their thread */
451 if (work->link.prev == queue)
452 wake_up(wfo_wq);
388 } 453 }
389 454
390 spin_unlock_irqrestore(&slow_work_queue_lock, flags); 455 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
@@ -487,9 +552,19 @@ EXPORT_SYMBOL(slow_work_cancel);
487 */ 552 */
488static void delayed_slow_work_timer(unsigned long data) 553static void delayed_slow_work_timer(unsigned long data)
489{ 554{
555 wait_queue_head_t *wfo_wq;
556 struct list_head *queue;
490 struct slow_work *work = (struct slow_work *) data; 557 struct slow_work *work = (struct slow_work *) data;
491 unsigned long flags; 558 unsigned long flags;
492 bool queued = false, put = false; 559 bool queued = false, put = false, first = false;
560
561 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
562 wfo_wq = &vslow_work_queue_waits_for_occupation;
563 queue = &vslow_work_queue;
564 } else {
565 wfo_wq = &slow_work_queue_waits_for_occupation;
566 queue = &slow_work_queue;
567 }
493 568
494 spin_lock_irqsave(&slow_work_queue_lock, flags); 569 spin_lock_irqsave(&slow_work_queue_lock, flags);
495 if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) { 570 if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) {
@@ -502,17 +577,18 @@ static void delayed_slow_work_timer(unsigned long data)
502 put = true; 577 put = true;
503 } else { 578 } else {
504 slow_work_mark_time(work); 579 slow_work_mark_time(work);
505 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 580 list_add_tail(&work->link, queue);
506 list_add_tail(&work->link, &vslow_work_queue);
507 else
508 list_add_tail(&work->link, &slow_work_queue);
509 queued = true; 581 queued = true;
582 if (work->link.prev == queue)
583 first = true;
510 } 584 }
511 } 585 }
512 586
513 spin_unlock_irqrestore(&slow_work_queue_lock, flags); 587 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
514 if (put) 588 if (put)
515 slow_work_put_ref(work); 589 slow_work_put_ref(work);
590 if (first)
591 wake_up(wfo_wq);
516 if (queued) 592 if (queued)
517 wake_up(&slow_work_thread_wq); 593 wake_up(&slow_work_thread_wq);
518} 594}