aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 17:13:23 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 17:13:23 -0500
commit3e12cefbe143b4947171ff92dd50024c4841e291 (patch)
treef58ec23a4092576ed08843cca5f5443a32106bd1 /block/blk-mq.c
parent6bec0035286119eefc32a5b1102127e6a4032cb2 (diff)
parentd427e3c82ef4fc5fbb22c0cef0b040e6767b1028 (diff)
Merge branch 'for-3.20/core' of git://git.kernel.dk/linux-block
Pull core block IO changes from Jens Axboe: "This contains: - A series from Christoph that cleans up and refactors various parts of the REQ_BLOCK_PC handling. Contributions in that series from Dongsu Park and Kent Overstreet as well. - CFQ: - A bug fix for cfq for realtime IO scheduling from Jeff Moyer. - A stable patch fixing a potential crash in CFQ in OOM situations. From Konstantin Khlebnikov. - blk-mq: - Add support for tag allocation policies, from Shaohua. This is a prep patch enabling libata (and other SCSI parts) to use the blk-mq tagging, instead of rolling their own. - Various little tweaks from Keith and Mike, in preparation for DM blk-mq support. - Minor little fixes or tweaks from me. - A double free error fix from Tony Battersby. - The partition 4k issue fixes from Matthew and Boaz. - Add support for zero+unprovision for blkdev_issue_zeroout() from Martin" * 'for-3.20/core' of git://git.kernel.dk/linux-block: (27 commits) block: remove unused function blk_bio_map_sg block: handle the null_mapped flag correctly in blk_rq_map_user_iov blk-mq: fix double-free in error path block: prevent request-to-request merging with gaps if not allowed blk-mq: make blk_mq_run_queues() static dm: fix multipath regression due to initializing wrong request cfq-iosched: handle failure of cfq group allocation block: Quiesce zeroout wrapper block: rewrite and split __bio_copy_iov() block: merge __bio_map_user_iov into bio_map_user_iov block: merge __bio_map_kern into bio_map_kern block: pass iov_iter to the BLOCK_PC mapping functions block: add a helper to free bio bounce buffer pages block: use blk_rq_map_user_iov to implement blk_rq_map_user block: simplify bio_map_kern block: mark blk-mq devices as stackable block: keep established cmd_flags when cloning into a blk-mq request block: add blk-mq support to blk_insert_cloned_request() block: require blk_rq_prep_clone() be given an initialized clone request blk-mq: add tag allocation policy ...
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 2390c5541e71..4f4bea21052e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -33,6 +33,7 @@ static DEFINE_MUTEX(all_q_mutex);
33static LIST_HEAD(all_q_list); 33static LIST_HEAD(all_q_list);
34 34
35static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); 35static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
36static void blk_mq_run_queues(struct request_queue *q);
36 37
37/* 38/*
38 * Check if any of the ctx's have pending work in this hardware queue 39 * Check if any of the ctx's have pending work in this hardware queue
@@ -117,7 +118,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q)
117 118
118 if (freeze) { 119 if (freeze) {
119 percpu_ref_kill(&q->mq_usage_counter); 120 percpu_ref_kill(&q->mq_usage_counter);
120 blk_mq_run_queues(q, false); 121 blk_mq_run_queues(q);
121 } 122 }
122} 123}
123EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); 124EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
@@ -136,6 +137,7 @@ void blk_mq_freeze_queue(struct request_queue *q)
136 blk_mq_freeze_queue_start(q); 137 blk_mq_freeze_queue_start(q);
137 blk_mq_freeze_queue_wait(q); 138 blk_mq_freeze_queue_wait(q);
138} 139}
140EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
139 141
140void blk_mq_unfreeze_queue(struct request_queue *q) 142void blk_mq_unfreeze_queue(struct request_queue *q)
141{ 143{
@@ -902,7 +904,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
902 &hctx->run_work, 0); 904 &hctx->run_work, 0);
903} 905}
904 906
905void blk_mq_run_queues(struct request_queue *q, bool async) 907static void blk_mq_run_queues(struct request_queue *q)
906{ 908{
907 struct blk_mq_hw_ctx *hctx; 909 struct blk_mq_hw_ctx *hctx;
908 int i; 910 int i;
@@ -913,10 +915,9 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
913 test_bit(BLK_MQ_S_STOPPED, &hctx->state)) 915 test_bit(BLK_MQ_S_STOPPED, &hctx->state))
914 continue; 916 continue;
915 917
916 blk_mq_run_hw_queue(hctx, async); 918 blk_mq_run_hw_queue(hctx, false);
917 } 919 }
918} 920}
919EXPORT_SYMBOL(blk_mq_run_queues);
920 921
921void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 922void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
922{ 923{
@@ -954,7 +955,6 @@ void blk_mq_start_hw_queues(struct request_queue *q)
954} 955}
955EXPORT_SYMBOL(blk_mq_start_hw_queues); 956EXPORT_SYMBOL(blk_mq_start_hw_queues);
956 957
957
958void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) 958void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
959{ 959{
960 struct blk_mq_hw_ctx *hctx; 960 struct blk_mq_hw_ctx *hctx;
@@ -1423,7 +1423,8 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1423 size_t rq_size, left; 1423 size_t rq_size, left;
1424 1424
1425 tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags, 1425 tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1426 set->numa_node); 1426 set->numa_node,
1427 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1427 if (!tags) 1428 if (!tags)
1428 return NULL; 1429 return NULL;
1429 1430