aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-11-24 03:27:23 -0500
committerJens Axboe <axboe@fb.com>2014-11-24 10:02:07 -0500
commitb657d7e632e0bc40e5e231332be39d69b2f1a0bb (patch)
tree89027f22274a6f9a4075fcd5736f9a41664efbd3 /block/blk-mq.c
parent5fabcb4c33fe11c7e3afdf805fde26c1a54d0953 (diff)
blk-mq: handle the single queue case in blk_mq_hctx_next_cpu
Don't duplicate the code to handle the not cpu bounce case in the caller, do it inside blk_mq_hctx_next_cpu instead. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c31
1 files changed, 10 insertions, 21 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4347aa2be6ae..27a347fe8f5b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -788,10 +788,11 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
788 */ 788 */
789static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) 789static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
790{ 790{
791 int cpu = hctx->next_cpu; 791 if (hctx->queue->nr_hw_queues == 1)
792 return WORK_CPU_UNBOUND;
792 793
793 if (--hctx->next_cpu_batch <= 0) { 794 if (--hctx->next_cpu_batch <= 0) {
794 int next_cpu; 795 int cpu = hctx->next_cpu, next_cpu;
795 796
796 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask); 797 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
797 if (next_cpu >= nr_cpu_ids) 798 if (next_cpu >= nr_cpu_ids)
@@ -799,9 +800,11 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
799 800
800 hctx->next_cpu = next_cpu; 801 hctx->next_cpu = next_cpu;
801 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 802 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
803
804 return cpu;
802 } 805 }
803 806
804 return cpu; 807 return hctx->next_cpu;
805} 808}
806 809
807void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 810void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
@@ -820,14 +823,8 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
820 put_cpu(); 823 put_cpu();
821 } 824 }
822 825
823 if (hctx->queue->nr_hw_queues == 1) 826 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
824 kblockd_schedule_delayed_work(&hctx->run_work, 0); 827 &hctx->run_work, 0);
825 else {
826 unsigned int cpu;
827
828 cpu = blk_mq_hctx_next_cpu(hctx);
829 kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
830 }
831} 828}
832 829
833void blk_mq_run_queues(struct request_queue *q, bool async) 830void blk_mq_run_queues(struct request_queue *q, bool async)
@@ -919,16 +916,8 @@ static void blk_mq_delay_work_fn(struct work_struct *work)
919 916
920void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 917void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
921{ 918{
922 unsigned long tmo = msecs_to_jiffies(msecs); 919 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
923 920 &hctx->delay_work, msecs_to_jiffies(msecs));
924 if (hctx->queue->nr_hw_queues == 1)
925 kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
926 else {
927 unsigned int cpu;
928
929 cpu = blk_mq_hctx_next_cpu(hctx);
930 kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
931 }
932} 921}
933EXPORT_SYMBOL(blk_mq_delay_queue); 922EXPORT_SYMBOL(blk_mq_delay_queue);
934 923