aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-09-11 11:31:18 -0400
committerJens Axboe <axboe@fb.com>2014-09-11 11:31:18 -0400
commitb207892b061da7608878e273ae22ba9bf9be264b (patch)
tree51daa46b89b83cad422941f52110b19571b85b79 /block
parent018a17bdc8658ad448497c84d4ba21b6985820ec (diff)
parenta516440542afcb9647f88d12c35640baf02d07ea (diff)
Merge branch 'for-linus' into for-3.18/core
A bit of churn on the for-linus side that would be nice to have in the core bits for 3.18, so pull it in to catch us up and make forward progress easier. Signed-off-by: Jens Axboe <axboe@fb.com> Conflicts: block/scsi_ioctl.c
Diffstat (limited to 'block')
-rw-r--r--block/bio-integrity.c2
-rw-r--r--block/blk-core.c1
-rw-r--r--block/blk-merge.c17
-rw-r--r--block/blk-mq.c127
-rw-r--r--block/blk-sysfs.c6
-rw-r--r--block/cfq-iosched.c19
-rw-r--r--block/genhd.c24
-rw-r--r--block/partition-generic.c2
-rw-r--r--block/scsi_ioctl.c36
9 files changed, 163 insertions, 71 deletions
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index bc423f7b02da..f14b4abbebd8 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -520,7 +520,7 @@ void bio_integrity_endio(struct bio *bio, int error)
520 */ 520 */
521 if (error) { 521 if (error) {
522 bio->bi_end_io = bip->bip_end_io; 522 bio->bi_end_io = bip->bip_end_io;
523 bio_endio(bio, error); 523 bio_endio_nodec(bio, error);
524 524
525 return; 525 return;
526 } 526 }
diff --git a/block/blk-core.c b/block/blk-core.c
index 817446175489..6946a4275e6f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1248,7 +1248,6 @@ void blk_rq_set_block_pc(struct request *rq)
1248 rq->__sector = (sector_t) -1; 1248 rq->__sector = (sector_t) -1;
1249 rq->bio = rq->biotail = NULL; 1249 rq->bio = rq->biotail = NULL;
1250 memset(rq->__cmd, 0, sizeof(rq->__cmd)); 1250 memset(rq->__cmd, 0, sizeof(rq->__cmd));
1251 rq->cmd = rq->__cmd;
1252} 1251}
1253EXPORT_SYMBOL(blk_rq_set_block_pc); 1252EXPORT_SYMBOL(blk_rq_set_block_pc);
1254 1253
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 54535831f1e1..77881798f793 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -10,10 +10,11 @@
10#include "blk.h" 10#include "blk.h"
11 11
12static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 12static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
13 struct bio *bio) 13 struct bio *bio,
14 bool no_sg_merge)
14{ 15{
15 struct bio_vec bv, bvprv = { NULL }; 16 struct bio_vec bv, bvprv = { NULL };
16 int cluster, high, highprv = 1, no_sg_merge; 17 int cluster, high, highprv = 1;
17 unsigned int seg_size, nr_phys_segs; 18 unsigned int seg_size, nr_phys_segs;
18 struct bio *fbio, *bbio; 19 struct bio *fbio, *bbio;
19 struct bvec_iter iter; 20 struct bvec_iter iter;
@@ -35,7 +36,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
35 cluster = blk_queue_cluster(q); 36 cluster = blk_queue_cluster(q);
36 seg_size = 0; 37 seg_size = 0;
37 nr_phys_segs = 0; 38 nr_phys_segs = 0;
38 no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
39 high = 0; 39 high = 0;
40 for_each_bio(bio) { 40 for_each_bio(bio) {
41 bio_for_each_segment(bv, bio, iter) { 41 bio_for_each_segment(bv, bio, iter) {
@@ -88,18 +88,23 @@ new_segment:
88 88
89void blk_recalc_rq_segments(struct request *rq) 89void blk_recalc_rq_segments(struct request *rq)
90{ 90{
91 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); 91 bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
92 &rq->q->queue_flags);
93
94 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
95 no_sg_merge);
92} 96}
93 97
94void blk_recount_segments(struct request_queue *q, struct bio *bio) 98void blk_recount_segments(struct request_queue *q, struct bio *bio)
95{ 99{
96 if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags)) 100 if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
101 bio->bi_vcnt < queue_max_segments(q))
97 bio->bi_phys_segments = bio->bi_vcnt; 102 bio->bi_phys_segments = bio->bi_vcnt;
98 else { 103 else {
99 struct bio *nxt = bio->bi_next; 104 struct bio *nxt = bio->bi_next;
100 105
101 bio->bi_next = NULL; 106 bio->bi_next = NULL;
102 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); 107 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
103 bio->bi_next = nxt; 108 bio->bi_next = nxt;
104 } 109 }
105 110
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 940aa8a34b70..067e600002d3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -112,18 +112,22 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
112 */ 112 */
113void blk_mq_freeze_queue(struct request_queue *q) 113void blk_mq_freeze_queue(struct request_queue *q)
114{ 114{
115 bool freeze;
116
115 spin_lock_irq(q->queue_lock); 117 spin_lock_irq(q->queue_lock);
116 q->mq_freeze_depth++; 118 freeze = !q->mq_freeze_depth++;
117 spin_unlock_irq(q->queue_lock); 119 spin_unlock_irq(q->queue_lock);
118 120
119 percpu_ref_kill(&q->mq_usage_counter); 121 if (freeze) {
120 blk_mq_run_queues(q, false); 122 percpu_ref_kill(&q->mq_usage_counter);
123 blk_mq_run_queues(q, false);
124 }
121 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); 125 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
122} 126}
123 127
124static void blk_mq_unfreeze_queue(struct request_queue *q) 128static void blk_mq_unfreeze_queue(struct request_queue *q)
125{ 129{
126 bool wake = false; 130 bool wake;
127 131
128 spin_lock_irq(q->queue_lock); 132 spin_lock_irq(q->queue_lock);
129 wake = !--q->mq_freeze_depth; 133 wake = !--q->mq_freeze_depth;
@@ -172,6 +176,8 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
172 /* tag was already set */ 176 /* tag was already set */
173 rq->errors = 0; 177 rq->errors = 0;
174 178
179 rq->cmd = rq->__cmd;
180
175 rq->extra_len = 0; 181 rq->extra_len = 0;
176 rq->sense_len = 0; 182 rq->sense_len = 0;
177 rq->resid_len = 0; 183 rq->resid_len = 0;
@@ -1072,13 +1078,17 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1072 blk_account_io_start(rq, 1); 1078 blk_account_io_start(rq, 1);
1073} 1079}
1074 1080
1081static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1082{
1083 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1084 !blk_queue_nomerges(hctx->queue);
1085}
1086
1075static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, 1087static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1076 struct blk_mq_ctx *ctx, 1088 struct blk_mq_ctx *ctx,
1077 struct request *rq, struct bio *bio) 1089 struct request *rq, struct bio *bio)
1078{ 1090{
1079 struct request_queue *q = hctx->queue; 1091 if (!hctx_allow_merges(hctx)) {
1080
1081 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
1082 blk_mq_bio_to_request(rq, bio); 1092 blk_mq_bio_to_request(rq, bio);
1083 spin_lock(&ctx->lock); 1093 spin_lock(&ctx->lock);
1084insert_rq: 1094insert_rq:
@@ -1086,6 +1096,8 @@ insert_rq:
1086 spin_unlock(&ctx->lock); 1096 spin_unlock(&ctx->lock);
1087 return false; 1097 return false;
1088 } else { 1098 } else {
1099 struct request_queue *q = hctx->queue;
1100
1089 spin_lock(&ctx->lock); 1101 spin_lock(&ctx->lock);
1090 if (!blk_mq_attempt_merge(q, ctx, bio)) { 1102 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1091 blk_mq_bio_to_request(rq, bio); 1103 blk_mq_bio_to_request(rq, bio);
@@ -1313,6 +1325,7 @@ static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1313 continue; 1325 continue;
1314 set->ops->exit_request(set->driver_data, tags->rqs[i], 1326 set->ops->exit_request(set->driver_data, tags->rqs[i],
1315 hctx_idx, i); 1327 hctx_idx, i);
1328 tags->rqs[i] = NULL;
1316 } 1329 }
1317 } 1330 }
1318 1331
@@ -1346,8 +1359,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1346 1359
1347 INIT_LIST_HEAD(&tags->page_list); 1360 INIT_LIST_HEAD(&tags->page_list);
1348 1361
1349 tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *), 1362 tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1350 GFP_KERNEL, set->numa_node); 1363 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1364 set->numa_node);
1351 if (!tags->rqs) { 1365 if (!tags->rqs) {
1352 blk_mq_free_tags(tags); 1366 blk_mq_free_tags(tags);
1353 return NULL; 1367 return NULL;
@@ -1371,8 +1385,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1371 this_order--; 1385 this_order--;
1372 1386
1373 do { 1387 do {
1374 page = alloc_pages_node(set->numa_node, GFP_KERNEL, 1388 page = alloc_pages_node(set->numa_node,
1375 this_order); 1389 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1390 this_order);
1376 if (page) 1391 if (page)
1377 break; 1392 break;
1378 if (!this_order--) 1393 if (!this_order--)
@@ -1396,8 +1411,10 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1396 if (set->ops->init_request) { 1411 if (set->ops->init_request) {
1397 if (set->ops->init_request(set->driver_data, 1412 if (set->ops->init_request(set->driver_data,
1398 tags->rqs[i], hctx_idx, i, 1413 tags->rqs[i], hctx_idx, i,
1399 set->numa_node)) 1414 set->numa_node)) {
1415 tags->rqs[i] = NULL;
1400 goto fail; 1416 goto fail;
1417 }
1401 } 1418 }
1402 1419
1403 p += rq_size; 1420 p += rq_size;
@@ -1408,7 +1425,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1408 return tags; 1425 return tags;
1409 1426
1410fail: 1427fail:
1411 pr_warn("%s: failed to allocate requests\n", __func__);
1412 blk_mq_free_rq_map(set, tags, hctx_idx); 1428 blk_mq_free_rq_map(set, tags, hctx_idx);
1413 return NULL; 1429 return NULL;
1414} 1430}
@@ -1578,7 +1594,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
1578 hctx->tags = set->tags[i]; 1594 hctx->tags = set->tags[i];
1579 1595
1580 /* 1596 /*
1581 * Allocate space for all possible cpus to avoid allocation in 1597 * Allocate space for all possible cpus to avoid allocation at
1582 * runtime 1598 * runtime
1583 */ 1599 */
1584 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), 1600 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
@@ -1666,8 +1682,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
1666 1682
1667 queue_for_each_hw_ctx(q, hctx, i) { 1683 queue_for_each_hw_ctx(q, hctx, i) {
1668 /* 1684 /*
1669 * If not software queues are mapped to this hardware queue, 1685 * If no software queues are mapped to this hardware queue,
1670 * disable it and free the request entries 1686 * disable it and free the request entries.
1671 */ 1687 */
1672 if (!hctx->nr_ctx) { 1688 if (!hctx->nr_ctx) {
1673 struct blk_mq_tag_set *set = q->tag_set; 1689 struct blk_mq_tag_set *set = q->tag_set;
@@ -1717,14 +1733,10 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
1717{ 1733{
1718 struct blk_mq_tag_set *set = q->tag_set; 1734 struct blk_mq_tag_set *set = q->tag_set;
1719 1735
1720 blk_mq_freeze_queue(q);
1721
1722 mutex_lock(&set->tag_list_lock); 1736 mutex_lock(&set->tag_list_lock);
1723 list_del_init(&q->tag_set_list); 1737 list_del_init(&q->tag_set_list);
1724 blk_mq_update_tag_set_depth(set); 1738 blk_mq_update_tag_set_depth(set);
1725 mutex_unlock(&set->tag_list_lock); 1739 mutex_unlock(&set->tag_list_lock);
1726
1727 blk_mq_unfreeze_queue(q);
1728} 1740}
1729 1741
1730static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 1742static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
@@ -1932,6 +1944,61 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1932 return NOTIFY_OK; 1944 return NOTIFY_OK;
1933} 1945}
1934 1946
1947static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
1948{
1949 int i;
1950
1951 for (i = 0; i < set->nr_hw_queues; i++) {
1952 set->tags[i] = blk_mq_init_rq_map(set, i);
1953 if (!set->tags[i])
1954 goto out_unwind;
1955 }
1956
1957 return 0;
1958
1959out_unwind:
1960 while (--i >= 0)
1961 blk_mq_free_rq_map(set, set->tags[i], i);
1962
1963 set->tags = NULL;
1964 return -ENOMEM;
1965}
1966
1967/*
1968 * Allocate the request maps associated with this tag_set. Note that this
1969 * may reduce the depth asked for, if memory is tight. set->queue_depth
1970 * will be updated to reflect the allocated depth.
1971 */
1972static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
1973{
1974 unsigned int depth;
1975 int err;
1976
1977 depth = set->queue_depth;
1978 do {
1979 err = __blk_mq_alloc_rq_maps(set);
1980 if (!err)
1981 break;
1982
1983 set->queue_depth >>= 1;
1984 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
1985 err = -ENOMEM;
1986 break;
1987 }
1988 } while (set->queue_depth);
1989
1990 if (!set->queue_depth || err) {
1991 pr_err("blk-mq: failed to allocate request map\n");
1992 return -ENOMEM;
1993 }
1994
1995 if (depth != set->queue_depth)
1996 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
1997 depth, set->queue_depth);
1998
1999 return 0;
2000}
2001
1935/* 2002/*
1936 * Alloc a tag set to be associated with one or more request queues. 2003 * Alloc a tag set to be associated with one or more request queues.
1937 * May fail with EINVAL for various error conditions. May adjust the 2004 * May fail with EINVAL for various error conditions. May adjust the
@@ -1940,8 +2007,6 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1940 */ 2007 */
1941int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 2008int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1942{ 2009{
1943 int i;
1944
1945 if (!set->nr_hw_queues) 2010 if (!set->nr_hw_queues)
1946 return -EINVAL; 2011 return -EINVAL;
1947 if (!set->queue_depth) 2012 if (!set->queue_depth)
@@ -1962,23 +2027,18 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1962 sizeof(struct blk_mq_tags *), 2027 sizeof(struct blk_mq_tags *),
1963 GFP_KERNEL, set->numa_node); 2028 GFP_KERNEL, set->numa_node);
1964 if (!set->tags) 2029 if (!set->tags)
1965 goto out; 2030 return -ENOMEM;
1966 2031
1967 for (i = 0; i < set->nr_hw_queues; i++) { 2032 if (blk_mq_alloc_rq_maps(set))
1968 set->tags[i] = blk_mq_init_rq_map(set, i); 2033 goto enomem;
1969 if (!set->tags[i])
1970 goto out_unwind;
1971 }
1972 2034
1973 mutex_init(&set->tag_list_lock); 2035 mutex_init(&set->tag_list_lock);
1974 INIT_LIST_HEAD(&set->tag_list); 2036 INIT_LIST_HEAD(&set->tag_list);
1975 2037
1976 return 0; 2038 return 0;
1977 2039enomem:
1978out_unwind: 2040 kfree(set->tags);
1979 while (--i >= 0) 2041 set->tags = NULL;
1980 blk_mq_free_rq_map(set, set->tags[i], i);
1981out:
1982 return -ENOMEM; 2042 return -ENOMEM;
1983} 2043}
1984EXPORT_SYMBOL(blk_mq_alloc_tag_set); 2044EXPORT_SYMBOL(blk_mq_alloc_tag_set);
@@ -1993,6 +2053,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
1993 } 2053 }
1994 2054
1995 kfree(set->tags); 2055 kfree(set->tags);
2056 set->tags = NULL;
1996} 2057}
1997EXPORT_SYMBOL(blk_mq_free_tag_set); 2058EXPORT_SYMBOL(blk_mq_free_tag_set);
1998 2059
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 4db5abf96b9e..17f5c84ce7bf 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -554,8 +554,10 @@ int blk_register_queue(struct gendisk *disk)
554 * Initialization must be complete by now. Finish the initial 554 * Initialization must be complete by now. Finish the initial
555 * bypass from queue allocation. 555 * bypass from queue allocation.
556 */ 556 */
557 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); 557 if (!blk_queue_init_done(q)) {
558 blk_queue_bypass_end(q); 558 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
559 blk_queue_bypass_end(q);
560 }
559 561
560 ret = blk_trace_init_sysfs(dev); 562 ret = blk_trace_init_sysfs(dev);
561 if (ret) 563 if (ret)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 900f569afcc5..6f2751d305de 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1272,15 +1272,22 @@ __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1272 rb_insert_color(&cfqg->rb_node, &st->rb); 1272 rb_insert_color(&cfqg->rb_node, &st->rb);
1273} 1273}
1274 1274
1275/*
1276 * This has to be called only on activation of cfqg
1277 */
1275static void 1278static void
1276cfq_update_group_weight(struct cfq_group *cfqg) 1279cfq_update_group_weight(struct cfq_group *cfqg)
1277{ 1280{
1278 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1279
1280 if (cfqg->new_weight) { 1281 if (cfqg->new_weight) {
1281 cfqg->weight = cfqg->new_weight; 1282 cfqg->weight = cfqg->new_weight;
1282 cfqg->new_weight = 0; 1283 cfqg->new_weight = 0;
1283 } 1284 }
1285}
1286
1287static void
1288cfq_update_group_leaf_weight(struct cfq_group *cfqg)
1289{
1290 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1284 1291
1285 if (cfqg->new_leaf_weight) { 1292 if (cfqg->new_leaf_weight) {
1286 cfqg->leaf_weight = cfqg->new_leaf_weight; 1293 cfqg->leaf_weight = cfqg->new_leaf_weight;
@@ -1299,7 +1306,12 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1299 /* add to the service tree */ 1306 /* add to the service tree */
1300 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); 1307 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1301 1308
1302 cfq_update_group_weight(cfqg); 1309 /*
1310 * Update leaf_weight. We cannot update weight at this point
1311 * because cfqg might already have been activated and is
1312 * contributing its current weight to the parent's child_weight.
1313 */
1314 cfq_update_group_leaf_weight(cfqg);
1303 __cfq_group_service_tree_add(st, cfqg); 1315 __cfq_group_service_tree_add(st, cfqg);
1304 1316
1305 /* 1317 /*
@@ -1323,6 +1335,7 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1323 */ 1335 */
1324 while ((parent = cfqg_parent(pos))) { 1336 while ((parent = cfqg_parent(pos))) {
1325 if (propagate) { 1337 if (propagate) {
1338 cfq_update_group_weight(pos);
1326 propagate = !parent->nr_active++; 1339 propagate = !parent->nr_active++;
1327 parent->children_weight += pos->weight; 1340 parent->children_weight += pos->weight;
1328 } 1341 }
diff --git a/block/genhd.c b/block/genhd.c
index 791f41943132..09da5e4a8e03 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -28,10 +28,10 @@ struct kobject *block_depr;
28/* for extended dynamic devt allocation, currently only one major is used */ 28/* for extended dynamic devt allocation, currently only one major is used */
29#define NR_EXT_DEVT (1 << MINORBITS) 29#define NR_EXT_DEVT (1 << MINORBITS)
30 30
31/* For extended devt allocation. ext_devt_mutex prevents look up 31/* For extended devt allocation. ext_devt_lock prevents look up
32 * results from going away underneath its user. 32 * results from going away underneath its user.
33 */ 33 */
34static DEFINE_MUTEX(ext_devt_mutex); 34static DEFINE_SPINLOCK(ext_devt_lock);
35static DEFINE_IDR(ext_devt_idr); 35static DEFINE_IDR(ext_devt_idr);
36 36
37static struct device_type disk_type; 37static struct device_type disk_type;
@@ -420,9 +420,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
420 } 420 }
421 421
422 /* allocate ext devt */ 422 /* allocate ext devt */
423 mutex_lock(&ext_devt_mutex); 423 idr_preload(GFP_KERNEL);
424 idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL); 424
425 mutex_unlock(&ext_devt_mutex); 425 spin_lock(&ext_devt_lock);
426 idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
427 spin_unlock(&ext_devt_lock);
428
429 idr_preload_end();
426 if (idx < 0) 430 if (idx < 0)
427 return idx == -ENOSPC ? -EBUSY : idx; 431 return idx == -ENOSPC ? -EBUSY : idx;
428 432
@@ -447,9 +451,9 @@ void blk_free_devt(dev_t devt)
447 return; 451 return;
448 452
449 if (MAJOR(devt) == BLOCK_EXT_MAJOR) { 453 if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
450 mutex_lock(&ext_devt_mutex); 454 spin_lock(&ext_devt_lock);
451 idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); 455 idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
452 mutex_unlock(&ext_devt_mutex); 456 spin_unlock(&ext_devt_lock);
453 } 457 }
454} 458}
455 459
@@ -665,7 +669,6 @@ void del_gendisk(struct gendisk *disk)
665 sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); 669 sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
666 pm_runtime_set_memalloc_noio(disk_to_dev(disk), false); 670 pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
667 device_del(disk_to_dev(disk)); 671 device_del(disk_to_dev(disk));
668 blk_free_devt(disk_to_dev(disk)->devt);
669} 672}
670EXPORT_SYMBOL(del_gendisk); 673EXPORT_SYMBOL(del_gendisk);
671 674
@@ -690,13 +693,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
690 } else { 693 } else {
691 struct hd_struct *part; 694 struct hd_struct *part;
692 695
693 mutex_lock(&ext_devt_mutex); 696 spin_lock(&ext_devt_lock);
694 part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); 697 part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
695 if (part && get_disk(part_to_disk(part))) { 698 if (part && get_disk(part_to_disk(part))) {
696 *partno = part->partno; 699 *partno = part->partno;
697 disk = part_to_disk(part); 700 disk = part_to_disk(part);
698 } 701 }
699 mutex_unlock(&ext_devt_mutex); 702 spin_unlock(&ext_devt_lock);
700 } 703 }
701 704
702 return disk; 705 return disk;
@@ -1098,6 +1101,7 @@ static void disk_release(struct device *dev)
1098{ 1101{
1099 struct gendisk *disk = dev_to_disk(dev); 1102 struct gendisk *disk = dev_to_disk(dev);
1100 1103
1104 blk_free_devt(dev->devt);
1101 disk_release_events(disk); 1105 disk_release_events(disk);
1102 kfree(disk->random); 1106 kfree(disk->random);
1103 disk_replace_part_tbl(disk, NULL); 1107 disk_replace_part_tbl(disk, NULL);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 789cdea05893..0d9e5f97f0a8 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -211,6 +211,7 @@ static const struct attribute_group *part_attr_groups[] = {
211static void part_release(struct device *dev) 211static void part_release(struct device *dev)
212{ 212{
213 struct hd_struct *p = dev_to_part(dev); 213 struct hd_struct *p = dev_to_part(dev);
214 blk_free_devt(dev->devt);
214 free_part_stats(p); 215 free_part_stats(p);
215 free_part_info(p); 216 free_part_info(p);
216 kfree(p); 217 kfree(p);
@@ -253,7 +254,6 @@ void delete_partition(struct gendisk *disk, int partno)
253 rcu_assign_pointer(ptbl->last_lookup, NULL); 254 rcu_assign_pointer(ptbl->last_lookup, NULL);
254 kobject_put(part->holder_dir); 255 kobject_put(part->holder_dir);
255 device_del(part_to_dev(part)); 256 device_del(part_to_dev(part));
256 blk_free_devt(part_devt(part));
257 257
258 hd_struct_put(part); 258 hd_struct_put(part);
259} 259}
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index a8b0d0208448..abb2e65b24cc 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -279,7 +279,6 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
279 r = blk_rq_unmap_user(bio); 279 r = blk_rq_unmap_user(bio);
280 if (!ret) 280 if (!ret)
281 ret = r; 281 ret = r;
282 blk_put_request(rq);
283 282
284 return ret; 283 return ret;
285} 284}
@@ -297,8 +296,6 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
297 296
298 if (hdr->interface_id != 'S') 297 if (hdr->interface_id != 'S')
299 return -EINVAL; 298 return -EINVAL;
300 if (hdr->cmd_len > BLK_MAX_CDB)
301 return -EINVAL;
302 299
303 if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) 300 if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
304 return -EIO; 301 return -EIO;
@@ -317,16 +314,23 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
317 if (hdr->flags & SG_FLAG_Q_AT_HEAD) 314 if (hdr->flags & SG_FLAG_Q_AT_HEAD)
318 at_head = 1; 315 at_head = 1;
319 316
317 ret = -ENOMEM;
320 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); 318 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
321 if (IS_ERR(rq)) 319 if (IS_ERR(rq))
322 return PTR_ERR(rq); 320 return PTR_ERR(rq);
323 blk_rq_set_block_pc(rq); 321 blk_rq_set_block_pc(rq);
324 322
325 if (blk_fill_sghdr_rq(q, rq, hdr, mode)) { 323 if (hdr->cmd_len > BLK_MAX_CDB) {
326 blk_put_request(rq); 324 rq->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
327 return -EFAULT; 325 if (!rq->cmd)
326 goto out_put_request;
328 } 327 }
329 328
329 ret = -EFAULT;
330 if (blk_fill_sghdr_rq(q, rq, hdr, mode))
331 goto out_free_cdb;
332
333 ret = 0;
330 if (hdr->iovec_count) { 334 if (hdr->iovec_count) {
331 size_t iov_data_len; 335 size_t iov_data_len;
332 struct iovec *iov = NULL; 336 struct iovec *iov = NULL;
@@ -335,7 +339,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
335 0, NULL, &iov); 339 0, NULL, &iov);
336 if (ret < 0) { 340 if (ret < 0) {
337 kfree(iov); 341 kfree(iov);
338 goto out; 342 goto out_free_cdb;
339 } 343 }
340 344
341 iov_data_len = ret; 345 iov_data_len = ret;
@@ -358,7 +362,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
358 GFP_KERNEL); 362 GFP_KERNEL);
359 363
360 if (ret) 364 if (ret)
361 goto out; 365 goto out_free_cdb;
362 366
363 bio = rq->bio; 367 bio = rq->bio;
364 memset(sense, 0, sizeof(sense)); 368 memset(sense, 0, sizeof(sense));
@@ -376,8 +380,12 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
376 380
377 hdr->duration = jiffies_to_msecs(jiffies - start_time); 381 hdr->duration = jiffies_to_msecs(jiffies - start_time);
378 382
379 return blk_complete_sghdr_rq(rq, hdr, bio); 383 ret = blk_complete_sghdr_rq(rq, hdr, bio);
380out: 384
385out_free_cdb:
386 if (rq->cmd != rq->__cmd)
387 kfree(rq->cmd);
388out_put_request:
381 blk_put_request(rq); 389 blk_put_request(rq);
382 return ret; 390 return ret;
383} 391}
@@ -450,8 +458,9 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
450 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); 458 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
451 if (IS_ERR(rq)) { 459 if (IS_ERR(rq)) {
452 err = PTR_ERR(rq); 460 err = PTR_ERR(rq);
453 goto error_free_buffer; 461 goto error;
454 } 462 }
463 blk_rq_set_block_pc(rq);
455 464
456 cmdlen = COMMAND_SIZE(opcode); 465 cmdlen = COMMAND_SIZE(opcode);
457 466
@@ -505,7 +514,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
505 memset(sense, 0, sizeof(sense)); 514 memset(sense, 0, sizeof(sense));
506 rq->sense = sense; 515 rq->sense = sense;
507 rq->sense_len = 0; 516 rq->sense_len = 0;
508 blk_rq_set_block_pc(rq);
509 517
510 blk_execute_rq(q, disk, rq, 0); 518 blk_execute_rq(q, disk, rq, 0);
511 519
@@ -524,9 +532,9 @@ out:
524 } 532 }
525 533
526error: 534error:
527 blk_put_request(rq);
528error_free_buffer:
529 kfree(buffer); 535 kfree(buffer);
536 if (rq)
537 blk_put_request(rq);
530 return err; 538 return err;
531} 539}
532EXPORT_SYMBOL_GPL(sg_scsi_ioctl); 540EXPORT_SYMBOL_GPL(sg_scsi_ioctl);