aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-11-07 17:03:59 -0500
committerJens Axboe <axboe@fb.com>2014-11-11 13:04:47 -0500
commit398205b8391b208f0034a392242867b28ad8af3d (patch)
tree2b7ce3ddc4029a15f7efbe4bb1e5711b9607dfc3 /block
parent9c6ac78eb3521c5937b2dd8a7d1b300f41092f45 (diff)
blk_mq: call preempt_disable/enable in blk_mq_run_hw_queue, and only if needed
preempt_disable/enable surrounds every call to blk_mq_run_hw_queue, except the one in blk-flush.c. In fact that one is always asynchronous, and it does not need smp_processor_id(). We can do the same for all other calls, avoiding preempt_disable when async is true. This avoids peppering blk-mq.c with preemption-disabled regions. Cc: Jens Axboe <axboe@kernel.dk> Cc: Thomas Gleixner <tglx@linutronix.de> Reported-by: Clark Williams <williams@redhat.com> Tested-by: Clark Williams <williams@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b355b5957cd7..8b309e81ed0f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -801,9 +801,18 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
801 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) 801 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
802 return; 802 return;
803 803
804 if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask)) 804 if (!async) {
805 __blk_mq_run_hw_queue(hctx); 805 preempt_disable();
806 else if (hctx->queue->nr_hw_queues == 1) 806 if (cpumask_test_cpu(smp_processor_id(), hctx->cpumask)) {
807 __blk_mq_run_hw_queue(hctx);
808 preempt_enable();
809 return;
810 }
811
812 preempt_enable();
813 }
814
815 if (hctx->queue->nr_hw_queues == 1)
807 kblockd_schedule_delayed_work(&hctx->run_work, 0); 816 kblockd_schedule_delayed_work(&hctx->run_work, 0);
808 else { 817 else {
809 unsigned int cpu; 818 unsigned int cpu;
@@ -824,9 +833,7 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
824 test_bit(BLK_MQ_S_STOPPED, &hctx->state)) 833 test_bit(BLK_MQ_S_STOPPED, &hctx->state))
825 continue; 834 continue;
826 835
827 preempt_disable();
828 blk_mq_run_hw_queue(hctx, async); 836 blk_mq_run_hw_queue(hctx, async);
829 preempt_enable();
830 } 837 }
831} 838}
832EXPORT_SYMBOL(blk_mq_run_queues); 839EXPORT_SYMBOL(blk_mq_run_queues);
@@ -853,9 +860,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
853{ 860{
854 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 861 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
855 862
856 preempt_disable();
857 blk_mq_run_hw_queue(hctx, false); 863 blk_mq_run_hw_queue(hctx, false);
858 preempt_enable();
859} 864}
860EXPORT_SYMBOL(blk_mq_start_hw_queue); 865EXPORT_SYMBOL(blk_mq_start_hw_queue);
861 866
@@ -880,9 +885,7 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
880 continue; 885 continue;
881 886
882 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 887 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
883 preempt_disable();
884 blk_mq_run_hw_queue(hctx, async); 888 blk_mq_run_hw_queue(hctx, async);
885 preempt_enable();
886 } 889 }
887} 890}
888EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 891EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);