aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-04-18 03:59:55 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-04-18 03:59:55 -0400
commit99e22598e9a8e0a996d69c8c0f6b7027cb57720a (patch)
tree9cf18bc4681889bdfcbc0c845e384f809fb29fce
parentb4cb290e0a7d19235bd075c2ad4d60dbab0bac15 (diff)
block: drop queue lock before calling __blk_run_queue() for kblockd punt
If we know we are going to punt to kblockd, we can drop the queue lock before calling into __blk_run_queue() since it only does a safe bit test and a workqueue call. Since kblockd needs to grab this very lock as one of the first things it does, it's a good optimization to drop the lock before waking kblockd. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r--block/blk-core.c33
1 files changed, 25 insertions, 8 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 09b262811fff..5e413933bc3a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -295,7 +295,8 @@ EXPORT_SYMBOL(blk_sync_queue);
295 * 295 *
296 * Description: 296 * Description:
297 * See @blk_run_queue. This variant must be called with the queue lock 297 * See @blk_run_queue. This variant must be called with the queue lock
298 * held and interrupts disabled. 298 * held and interrupts disabled. If force_kblockd is true, then it is
299 * safe to call this without holding the queue lock.
299 * 300 *
300 */ 301 */
301void __blk_run_queue(struct request_queue *q, bool force_kblockd) 302void __blk_run_queue(struct request_queue *q, bool force_kblockd)
@@ -2671,9 +2672,23 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
2671 */ 2672 */
2672static void queue_unplugged(struct request_queue *q, unsigned int depth, 2673static void queue_unplugged(struct request_queue *q, unsigned int depth,
2673 bool from_schedule) 2674 bool from_schedule)
2675 __releases(q->queue_lock)
2674{ 2676{
2675 trace_block_unplug(q, depth, !from_schedule); 2677 trace_block_unplug(q, depth, !from_schedule);
2676 __blk_run_queue(q, from_schedule); 2678
2679 /*
2680 * If we are punting this to kblockd, then we can safely drop
2681 * the queue_lock before waking kblockd (which needs to take
2682 * this lock).
2683 */
2684 if (from_schedule) {
2685 spin_unlock(q->queue_lock);
2686 __blk_run_queue(q, true);
2687 } else {
2688 __blk_run_queue(q, false);
2689 spin_unlock(q->queue_lock);
2690 }
2691
2677} 2692}
2678 2693
2679static void flush_plug_callbacks(struct blk_plug *plug) 2694static void flush_plug_callbacks(struct blk_plug *plug)
@@ -2729,10 +2744,11 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2729 BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG)); 2744 BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
2730 BUG_ON(!rq->q); 2745 BUG_ON(!rq->q);
2731 if (rq->q != q) { 2746 if (rq->q != q) {
2732 if (q) { 2747 /*
2748 * This drops the queue lock
2749 */
2750 if (q)
2733 queue_unplugged(q, depth, from_schedule); 2751 queue_unplugged(q, depth, from_schedule);
2734 spin_unlock(q->queue_lock);
2735 }
2736 q = rq->q; 2752 q = rq->q;
2737 depth = 0; 2753 depth = 0;
2738 spin_lock(q->queue_lock); 2754 spin_lock(q->queue_lock);
@@ -2750,10 +2766,11 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2750 depth++; 2766 depth++;
2751 } 2767 }
2752 2768
2753 if (q) { 2769 /*
2770 * This drops the queue lock
2771 */
2772 if (q)
2754 queue_unplugged(q, depth, from_schedule); 2773 queue_unplugged(q, depth, from_schedule);
2755 spin_unlock(q->queue_lock);
2756 }
2757 2774
2758 local_irq_restore(flags); 2775 local_irq_restore(flags);
2759} 2776}