aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2015-03-31 12:00:32 -0400
committerMike Snitzer <snitzer@redhat.com>2015-03-31 12:00:32 -0400
commit851c9f38e4199adb612366f8e202036d624d5de2 (patch)
tree4b9bfae45b5a9cfb324577a838a59de0cfc3f7a5
parente9637415a92cf25ad800b7fdeddcd30cce7b44ab (diff)
parentc76cbbcf404475f8885b2252049dac99b0614868 (diff)
Merge remote-tracking branch 'jens/for-4.1/core' into dm/for-next
-rw-r--r--block/blk-core.c19
-rw-r--r--block/blk-mq-sysfs.c1
-rw-r--r--block/blk-mq.c54
-rw-r--r--include/linux/blk-mq.h3
4 files changed, 52 insertions, 25 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 794c3e7f01cf..fd154b94447a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -557,6 +557,18 @@ void blk_cleanup_queue(struct request_queue *q)
557} 557}
558EXPORT_SYMBOL(blk_cleanup_queue); 558EXPORT_SYMBOL(blk_cleanup_queue);
559 559
560/* Allocate memory local to the request queue */
561static void *alloc_request_struct(gfp_t gfp_mask, void *data)
562{
563 int nid = (int)(long)data;
564 return kmem_cache_alloc_node(request_cachep, gfp_mask, nid);
565}
566
567static void free_request_struct(void *element, void *unused)
568{
569 kmem_cache_free(request_cachep, element);
570}
571
560int blk_init_rl(struct request_list *rl, struct request_queue *q, 572int blk_init_rl(struct request_list *rl, struct request_queue *q,
561 gfp_t gfp_mask) 573 gfp_t gfp_mask)
562{ 574{
@@ -569,9 +581,10 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
569 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 581 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
570 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); 582 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
571 583
572 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 584 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct,
573 mempool_free_slab, request_cachep, 585 free_request_struct,
574 gfp_mask, q->node); 586 (void *)(long)q->node, gfp_mask,
587 q->node);
575 if (!rl->rq_pool) 588 if (!rl->rq_pool)
576 return -ENOMEM; 589 return -ENOMEM;
577 590
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 1630a20d5dcf..b79685e06b70 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -436,6 +436,7 @@ int blk_mq_register_disk(struct gendisk *disk)
436 436
437 return 0; 437 return 0;
438} 438}
439EXPORT_SYMBOL_GPL(blk_mq_register_disk);
439 440
440void blk_mq_sysfs_unregister(struct request_queue *q) 441void blk_mq_sysfs_unregister(struct request_queue *q)
441{ 442{
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b7b8933ec241..1192f85e5ff3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -33,7 +33,6 @@ static DEFINE_MUTEX(all_q_mutex);
33static LIST_HEAD(all_q_list); 33static LIST_HEAD(all_q_list);
34 34
35static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); 35static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
36static void blk_mq_run_queues(struct request_queue *q);
37 36
38/* 37/*
39 * Check if any of the ctx's have pending work in this hardware queue 38 * Check if any of the ctx's have pending work in this hardware queue
@@ -78,7 +77,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
78 clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word); 77 clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
79} 78}
80 79
81static int blk_mq_queue_enter(struct request_queue *q) 80static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
82{ 81{
83 while (true) { 82 while (true) {
84 int ret; 83 int ret;
@@ -86,6 +85,9 @@ static int blk_mq_queue_enter(struct request_queue *q)
86 if (percpu_ref_tryget_live(&q->mq_usage_counter)) 85 if (percpu_ref_tryget_live(&q->mq_usage_counter))
87 return 0; 86 return 0;
88 87
88 if (!(gfp & __GFP_WAIT))
89 return -EBUSY;
90
89 ret = wait_event_interruptible(q->mq_freeze_wq, 91 ret = wait_event_interruptible(q->mq_freeze_wq,
90 !q->mq_freeze_depth || blk_queue_dying(q)); 92 !q->mq_freeze_depth || blk_queue_dying(q));
91 if (blk_queue_dying(q)) 93 if (blk_queue_dying(q))
@@ -118,7 +120,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q)
118 120
119 if (freeze) { 121 if (freeze) {
120 percpu_ref_kill(&q->mq_usage_counter); 122 percpu_ref_kill(&q->mq_usage_counter);
121 blk_mq_run_queues(q); 123 blk_mq_run_hw_queues(q, false);
122 } 124 }
123} 125}
124EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); 126EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
@@ -257,7 +259,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
257 struct blk_mq_alloc_data alloc_data; 259 struct blk_mq_alloc_data alloc_data;
258 int ret; 260 int ret;
259 261
260 ret = blk_mq_queue_enter(q); 262 ret = blk_mq_queue_enter(q, gfp);
261 if (ret) 263 if (ret)
262 return ERR_PTR(ret); 264 return ERR_PTR(ret);
263 265
@@ -904,7 +906,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
904 &hctx->run_work, 0); 906 &hctx->run_work, 0);
905} 907}
906 908
907static void blk_mq_run_queues(struct request_queue *q) 909void blk_mq_run_hw_queues(struct request_queue *q, bool async)
908{ 910{
909 struct blk_mq_hw_ctx *hctx; 911 struct blk_mq_hw_ctx *hctx;
910 int i; 912 int i;
@@ -915,9 +917,10 @@ static void blk_mq_run_queues(struct request_queue *q)
915 test_bit(BLK_MQ_S_STOPPED, &hctx->state)) 917 test_bit(BLK_MQ_S_STOPPED, &hctx->state))
916 continue; 918 continue;
917 919
918 blk_mq_run_hw_queue(hctx, false); 920 blk_mq_run_hw_queue(hctx, async);
919 } 921 }
920} 922}
923EXPORT_SYMBOL(blk_mq_run_hw_queues);
921 924
922void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 925void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
923{ 926{
@@ -1186,7 +1189,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
1186 int rw = bio_data_dir(bio); 1189 int rw = bio_data_dir(bio);
1187 struct blk_mq_alloc_data alloc_data; 1190 struct blk_mq_alloc_data alloc_data;
1188 1191
1189 if (unlikely(blk_mq_queue_enter(q))) { 1192 if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
1190 bio_endio(bio, -EIO); 1193 bio_endio(bio, -EIO);
1191 return NULL; 1194 return NULL;
1192 } 1195 }
@@ -1891,9 +1894,25 @@ void blk_mq_release(struct request_queue *q)
1891 1894
1892struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 1895struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1893{ 1896{
1897 struct request_queue *uninit_q, *q;
1898
1899 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1900 if (!uninit_q)
1901 return ERR_PTR(-ENOMEM);
1902
1903 q = blk_mq_init_allocated_queue(set, uninit_q);
1904 if (IS_ERR(q))
1905 blk_cleanup_queue(uninit_q);
1906
1907 return q;
1908}
1909EXPORT_SYMBOL(blk_mq_init_queue);
1910
1911struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
1912 struct request_queue *q)
1913{
1894 struct blk_mq_hw_ctx **hctxs; 1914 struct blk_mq_hw_ctx **hctxs;
1895 struct blk_mq_ctx __percpu *ctx; 1915 struct blk_mq_ctx __percpu *ctx;
1896 struct request_queue *q;
1897 unsigned int *map; 1916 unsigned int *map;
1898 int i; 1917 int i;
1899 1918
@@ -1928,20 +1947,16 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1928 hctxs[i]->queue_num = i; 1947 hctxs[i]->queue_num = i;
1929 } 1948 }
1930 1949
1931 q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1932 if (!q)
1933 goto err_hctxs;
1934
1935 /* 1950 /*
1936 * Init percpu_ref in atomic mode so that it's faster to shutdown. 1951 * Init percpu_ref in atomic mode so that it's faster to shutdown.
1937 * See blk_register_queue() for details. 1952 * See blk_register_queue() for details.
1938 */ 1953 */
1939 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, 1954 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
1940 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) 1955 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
1941 goto err_mq_usage; 1956 goto err_hctxs;
1942 1957
1943 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); 1958 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1944 blk_queue_rq_timeout(q, 30000); 1959 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000);
1945 1960
1946 q->nr_queues = nr_cpu_ids; 1961 q->nr_queues = nr_cpu_ids;
1947 q->nr_hw_queues = set->nr_hw_queues; 1962 q->nr_hw_queues = set->nr_hw_queues;
@@ -1967,9 +1982,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1967 else 1982 else
1968 blk_queue_make_request(q, blk_sq_make_request); 1983 blk_queue_make_request(q, blk_sq_make_request);
1969 1984
1970 if (set->timeout)
1971 blk_queue_rq_timeout(q, set->timeout);
1972
1973 /* 1985 /*
1974 * Do this after blk_queue_make_request() overrides it... 1986 * Do this after blk_queue_make_request() overrides it...
1975 */ 1987 */
@@ -1981,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1981 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 1993 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1982 1994
1983 if (blk_mq_init_hw_queues(q, set)) 1995 if (blk_mq_init_hw_queues(q, set))
1984 goto err_mq_usage; 1996 goto err_hctxs;
1985 1997
1986 mutex_lock(&all_q_mutex); 1998 mutex_lock(&all_q_mutex);
1987 list_add_tail(&q->all_q_node, &all_q_list); 1999 list_add_tail(&q->all_q_node, &all_q_list);
@@ -1993,8 +2005,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1993 2005
1994 return q; 2006 return q;
1995 2007
1996err_mq_usage:
1997 blk_cleanup_queue(q);
1998err_hctxs: 2008err_hctxs:
1999 kfree(map); 2009 kfree(map);
2000 for (i = 0; i < set->nr_hw_queues; i++) { 2010 for (i = 0; i < set->nr_hw_queues; i++) {
@@ -2009,7 +2019,7 @@ err_percpu:
2009 free_percpu(ctx); 2019 free_percpu(ctx);
2010 return ERR_PTR(-ENOMEM); 2020 return ERR_PTR(-ENOMEM);
2011} 2021}
2012EXPORT_SYMBOL(blk_mq_init_queue); 2022EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2013 2023
2014void blk_mq_free_queue(struct request_queue *q) 2024void blk_mq_free_queue(struct request_queue *q)
2015{ 2025{
@@ -2161,7 +2171,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2161 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 2171 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2162 return -EINVAL; 2172 return -EINVAL;
2163 2173
2164 if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue) 2174 if (!set->ops->queue_rq || !set->ops->map_queue)
2165 return -EINVAL; 2175 return -EINVAL;
2166 2176
2167 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 2177 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 7aec86127335..ebfe707cf722 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -164,6 +164,8 @@ enum {
164 << BLK_MQ_F_ALLOC_POLICY_START_BIT) 164 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
165 165
166struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); 166struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
167struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
168 struct request_queue *q);
167void blk_mq_finish_init(struct request_queue *q); 169void blk_mq_finish_init(struct request_queue *q);
168int blk_mq_register_disk(struct gendisk *); 170int blk_mq_register_disk(struct gendisk *);
169void blk_mq_unregister_disk(struct gendisk *); 171void blk_mq_unregister_disk(struct gendisk *);
@@ -218,6 +220,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
218void blk_mq_stop_hw_queues(struct request_queue *q); 220void blk_mq_stop_hw_queues(struct request_queue *q);
219void blk_mq_start_hw_queues(struct request_queue *q); 221void blk_mq_start_hw_queues(struct request_queue *q);
220void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 222void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
223void blk_mq_run_hw_queues(struct request_queue *q, bool async);
221void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 224void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
222void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, 225void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
223 void *priv); 226 void *priv);