aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-04-18 16:21:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-04-18 16:21:18 -0400
commit8a83f33100c691f5a576dba259cc05502dc358f0 (patch)
treec10c74bd28026cefdb8caa1362a0ccc30ea695a5 /block/blk-core.c
parent5d5b1b9f79ebad81215d11e208e9bfa9679a4ddd (diff)
parent24ecfbe27f65563909b14492afda2f1c21f7c044 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: block: add blk_run_queue_async block: blk_delay_queue() should use kblockd workqueue md: fix up raid1/raid10 unplugging. md: incorporate new plugging into raid5. md: provide generic support for handling unplug callbacks. md - remove old plugging code. md/dm - remove remains of plug_fn callback. md: use new plugging interface for RAID IO. block: drop queue lock before calling __blk_run_queue() for kblockd punt Revert "block: add callback function for unplug notification" block: Enhance new plugging support to support general callbacks
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c83
1 files changed, 65 insertions, 18 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 78b7b0cb7216..5fa3dd2705c6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -204,7 +204,7 @@ static void blk_delay_work(struct work_struct *work)
204 204
205 q = container_of(work, struct request_queue, delay_work.work); 205 q = container_of(work, struct request_queue, delay_work.work);
206 spin_lock_irq(q->queue_lock); 206 spin_lock_irq(q->queue_lock);
207 __blk_run_queue(q, false); 207 __blk_run_queue(q);
208 spin_unlock_irq(q->queue_lock); 208 spin_unlock_irq(q->queue_lock);
209} 209}
210 210
@@ -220,7 +220,8 @@ static void blk_delay_work(struct work_struct *work)
220 */ 220 */
221void blk_delay_queue(struct request_queue *q, unsigned long msecs) 221void blk_delay_queue(struct request_queue *q, unsigned long msecs)
222{ 222{
223 schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs)); 223 queue_delayed_work(kblockd_workqueue, &q->delay_work,
224 msecs_to_jiffies(msecs));
224} 225}
225EXPORT_SYMBOL(blk_delay_queue); 226EXPORT_SYMBOL(blk_delay_queue);
226 227
@@ -238,7 +239,7 @@ void blk_start_queue(struct request_queue *q)
238 WARN_ON(!irqs_disabled()); 239 WARN_ON(!irqs_disabled());
239 240
240 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 241 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
241 __blk_run_queue(q, false); 242 __blk_run_queue(q);
242} 243}
243EXPORT_SYMBOL(blk_start_queue); 244EXPORT_SYMBOL(blk_start_queue);
244 245
@@ -296,9 +297,8 @@ EXPORT_SYMBOL(blk_sync_queue);
296 * Description: 297 * Description:
297 * See @blk_run_queue. This variant must be called with the queue lock 298 * See @blk_run_queue. This variant must be called with the queue lock
298 * held and interrupts disabled. 299 * held and interrupts disabled.
299 *
300 */ 300 */
301void __blk_run_queue(struct request_queue *q, bool force_kblockd) 301void __blk_run_queue(struct request_queue *q)
302{ 302{
303 if (unlikely(blk_queue_stopped(q))) 303 if (unlikely(blk_queue_stopped(q)))
304 return; 304 return;
@@ -307,7 +307,7 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
307 * Only recurse once to avoid overrunning the stack, let the unplug 307 * Only recurse once to avoid overrunning the stack, let the unplug
308 * handling reinvoke the handler shortly if we already got there. 308 * handling reinvoke the handler shortly if we already got there.
309 */ 309 */
310 if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 310 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
311 q->request_fn(q); 311 q->request_fn(q);
312 queue_flag_clear(QUEUE_FLAG_REENTER, q); 312 queue_flag_clear(QUEUE_FLAG_REENTER, q);
313 } else 313 } else
@@ -316,6 +316,20 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
316EXPORT_SYMBOL(__blk_run_queue); 316EXPORT_SYMBOL(__blk_run_queue);
317 317
318/** 318/**
319 * blk_run_queue_async - run a single device queue in workqueue context
320 * @q: The queue to run
321 *
322 * Description:
323 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
324 * of us.
325 */
326void blk_run_queue_async(struct request_queue *q)
327{
328 if (likely(!blk_queue_stopped(q)))
329 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
330}
331
332/**
319 * blk_run_queue - run a single device queue 333 * blk_run_queue - run a single device queue
320 * @q: The queue to run 334 * @q: The queue to run
321 * 335 *
@@ -328,7 +342,7 @@ void blk_run_queue(struct request_queue *q)
328 unsigned long flags; 342 unsigned long flags;
329 343
330 spin_lock_irqsave(q->queue_lock, flags); 344 spin_lock_irqsave(q->queue_lock, flags);
331 __blk_run_queue(q, false); 345 __blk_run_queue(q);
332 spin_unlock_irqrestore(q->queue_lock, flags); 346 spin_unlock_irqrestore(q->queue_lock, flags);
333} 347}
334EXPORT_SYMBOL(blk_run_queue); 348EXPORT_SYMBOL(blk_run_queue);
@@ -977,7 +991,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
977 blk_queue_end_tag(q, rq); 991 blk_queue_end_tag(q, rq);
978 992
979 add_acct_request(q, rq, where); 993 add_acct_request(q, rq, where);
980 __blk_run_queue(q, false); 994 __blk_run_queue(q);
981 spin_unlock_irqrestore(q->queue_lock, flags); 995 spin_unlock_irqrestore(q->queue_lock, flags);
982} 996}
983EXPORT_SYMBOL(blk_insert_request); 997EXPORT_SYMBOL(blk_insert_request);
@@ -1321,7 +1335,7 @@ get_rq:
1321 } else { 1335 } else {
1322 spin_lock_irq(q->queue_lock); 1336 spin_lock_irq(q->queue_lock);
1323 add_acct_request(q, req, where); 1337 add_acct_request(q, req, where);
1324 __blk_run_queue(q, false); 1338 __blk_run_queue(q);
1325out_unlock: 1339out_unlock:
1326 spin_unlock_irq(q->queue_lock); 1340 spin_unlock_irq(q->queue_lock);
1327 } 1341 }
@@ -2638,6 +2652,7 @@ void blk_start_plug(struct blk_plug *plug)
2638 2652
2639 plug->magic = PLUG_MAGIC; 2653 plug->magic = PLUG_MAGIC;
2640 INIT_LIST_HEAD(&plug->list); 2654 INIT_LIST_HEAD(&plug->list);
2655 INIT_LIST_HEAD(&plug->cb_list);
2641 plug->should_sort = 0; 2656 plug->should_sort = 0;
2642 2657
2643 /* 2658 /*
@@ -2670,12 +2685,41 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
2670 */ 2685 */
2671static void queue_unplugged(struct request_queue *q, unsigned int depth, 2686static void queue_unplugged(struct request_queue *q, unsigned int depth,
2672 bool from_schedule) 2687 bool from_schedule)
2688 __releases(q->queue_lock)
2673{ 2689{
2674 trace_block_unplug(q, depth, !from_schedule); 2690 trace_block_unplug(q, depth, !from_schedule);
2675 __blk_run_queue(q, from_schedule);
2676 2691
2677 if (q->unplugged_fn) 2692 /*
2678 q->unplugged_fn(q); 2693 * If we are punting this to kblockd, then we can safely drop
2694 * the queue_lock before waking kblockd (which needs to take
2695 * this lock).
2696 */
2697 if (from_schedule) {
2698 spin_unlock(q->queue_lock);
2699 blk_run_queue_async(q);
2700 } else {
2701 __blk_run_queue(q);
2702 spin_unlock(q->queue_lock);
2703 }
2704
2705}
2706
2707static void flush_plug_callbacks(struct blk_plug *plug)
2708{
2709 LIST_HEAD(callbacks);
2710
2711 if (list_empty(&plug->cb_list))
2712 return;
2713
2714 list_splice_init(&plug->cb_list, &callbacks);
2715
2716 while (!list_empty(&callbacks)) {
2717 struct blk_plug_cb *cb = list_first_entry(&callbacks,
2718 struct blk_plug_cb,
2719 list);
2720 list_del(&cb->list);
2721 cb->callback(cb);
2722 }
2679} 2723}
2680 2724
2681void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2725void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
@@ -2688,6 +2732,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2688 2732
2689 BUG_ON(plug->magic != PLUG_MAGIC); 2733 BUG_ON(plug->magic != PLUG_MAGIC);
2690 2734
2735 flush_plug_callbacks(plug);
2691 if (list_empty(&plug->list)) 2736 if (list_empty(&plug->list))
2692 return; 2737 return;
2693 2738
@@ -2712,10 +2757,11 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2712 BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG)); 2757 BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
2713 BUG_ON(!rq->q); 2758 BUG_ON(!rq->q);
2714 if (rq->q != q) { 2759 if (rq->q != q) {
2715 if (q) { 2760 /*
2761 * This drops the queue lock
2762 */
2763 if (q)
2716 queue_unplugged(q, depth, from_schedule); 2764 queue_unplugged(q, depth, from_schedule);
2717 spin_unlock(q->queue_lock);
2718 }
2719 q = rq->q; 2765 q = rq->q;
2720 depth = 0; 2766 depth = 0;
2721 spin_lock(q->queue_lock); 2767 spin_lock(q->queue_lock);
@@ -2733,10 +2779,11 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2733 depth++; 2779 depth++;
2734 } 2780 }
2735 2781
2736 if (q) { 2782 /*
2783 * This drops the queue lock
2784 */
2785 if (q)
2737 queue_unplugged(q, depth, from_schedule); 2786 queue_unplugged(q, depth, from_schedule);
2738 spin_unlock(q->queue_lock);
2739 }
2740 2787
2741 local_irq_restore(flags); 2788 local_irq_restore(flags);
2742} 2789}