aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-09-13 12:39:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-09-13 12:39:55 -0400
commit645cc09381a14892a19f972cf36b90e2f7bdff8b (patch)
tree4e491409a10f00e734ef873bf0a286c45038114d
parentfc486b03cae382601b366ab460b05e1a01bf69cd (diff)
parenta516440542afcb9647f88d12c35640baf02d07ea (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A small collection of fixes for the current rc series. This contains: - Two small blk-mq patches from Rob Elliott, cleaning up error case at init time. - A fix from Ming Lei, fixing SG merging for blk-mq where QUEUE_FLAG_SG_NO_MERGE is the default. - A dev_t minor lifetime fix from Keith, fixing an issue where a minor might be reused before all references to it were gone. - Fix from Alan Stern where an unbalanced queue bypass caused SCSI some headaches when it does a series of add/del on devices without fully registrering the queue. - A fix from me for improving the scaling of tag depth in blk-mq if we are short on memory" * 'for-linus' of git://git.kernel.dk/linux-block: blk-mq: scale depth and rq map appropriate if low on memory Block: fix unbalanced bypass-disable in blk_register_queue block: Fix dev_t minor allocation lifetime blk-mq: cleanup after blk_mq_init_rq_map failures blk-mq: pass along blk_mq_alloc_tag_set return values blk-merge: fix blk_recount_segments
-rw-r--r--block/blk-merge.c17
-rw-r--r--block/blk-mq.c91
-rw-r--r--block/blk-sysfs.c6
-rw-r--r--block/genhd.c24
-rw-r--r--block/partition-generic.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c1
-rw-r--r--drivers/block/null_blk.c29
7 files changed, 123 insertions, 47 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 54535831f1e1..77881798f793 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -10,10 +10,11 @@
10#include "blk.h" 10#include "blk.h"
11 11
12static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 12static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
13 struct bio *bio) 13 struct bio *bio,
14 bool no_sg_merge)
14{ 15{
15 struct bio_vec bv, bvprv = { NULL }; 16 struct bio_vec bv, bvprv = { NULL };
16 int cluster, high, highprv = 1, no_sg_merge; 17 int cluster, high, highprv = 1;
17 unsigned int seg_size, nr_phys_segs; 18 unsigned int seg_size, nr_phys_segs;
18 struct bio *fbio, *bbio; 19 struct bio *fbio, *bbio;
19 struct bvec_iter iter; 20 struct bvec_iter iter;
@@ -35,7 +36,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
35 cluster = blk_queue_cluster(q); 36 cluster = blk_queue_cluster(q);
36 seg_size = 0; 37 seg_size = 0;
37 nr_phys_segs = 0; 38 nr_phys_segs = 0;
38 no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
39 high = 0; 39 high = 0;
40 for_each_bio(bio) { 40 for_each_bio(bio) {
41 bio_for_each_segment(bv, bio, iter) { 41 bio_for_each_segment(bv, bio, iter) {
@@ -88,18 +88,23 @@ new_segment:
88 88
89void blk_recalc_rq_segments(struct request *rq) 89void blk_recalc_rq_segments(struct request *rq)
90{ 90{
91 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); 91 bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
92 &rq->q->queue_flags);
93
94 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
95 no_sg_merge);
92} 96}
93 97
94void blk_recount_segments(struct request_queue *q, struct bio *bio) 98void blk_recount_segments(struct request_queue *q, struct bio *bio)
95{ 99{
96 if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags)) 100 if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
101 bio->bi_vcnt < queue_max_segments(q))
97 bio->bi_phys_segments = bio->bi_vcnt; 102 bio->bi_phys_segments = bio->bi_vcnt;
98 else { 103 else {
99 struct bio *nxt = bio->bi_next; 104 struct bio *nxt = bio->bi_next;
100 105
101 bio->bi_next = NULL; 106 bio->bi_next = NULL;
102 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); 107 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
103 bio->bi_next = nxt; 108 bio->bi_next = nxt;
104 } 109 }
105 110
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4aac82615a46..383ea0cb1f0a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1321,6 +1321,7 @@ static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1321 continue; 1321 continue;
1322 set->ops->exit_request(set->driver_data, tags->rqs[i], 1322 set->ops->exit_request(set->driver_data, tags->rqs[i],
1323 hctx_idx, i); 1323 hctx_idx, i);
1324 tags->rqs[i] = NULL;
1324 } 1325 }
1325 } 1326 }
1326 1327
@@ -1354,8 +1355,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1354 1355
1355 INIT_LIST_HEAD(&tags->page_list); 1356 INIT_LIST_HEAD(&tags->page_list);
1356 1357
1357 tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *), 1358 tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1358 GFP_KERNEL, set->numa_node); 1359 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1360 set->numa_node);
1359 if (!tags->rqs) { 1361 if (!tags->rqs) {
1360 blk_mq_free_tags(tags); 1362 blk_mq_free_tags(tags);
1361 return NULL; 1363 return NULL;
@@ -1379,8 +1381,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1379 this_order--; 1381 this_order--;
1380 1382
1381 do { 1383 do {
1382 page = alloc_pages_node(set->numa_node, GFP_KERNEL, 1384 page = alloc_pages_node(set->numa_node,
1383 this_order); 1385 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1386 this_order);
1384 if (page) 1387 if (page)
1385 break; 1388 break;
1386 if (!this_order--) 1389 if (!this_order--)
@@ -1404,8 +1407,10 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1404 if (set->ops->init_request) { 1407 if (set->ops->init_request) {
1405 if (set->ops->init_request(set->driver_data, 1408 if (set->ops->init_request(set->driver_data,
1406 tags->rqs[i], hctx_idx, i, 1409 tags->rqs[i], hctx_idx, i,
1407 set->numa_node)) 1410 set->numa_node)) {
1411 tags->rqs[i] = NULL;
1408 goto fail; 1412 goto fail;
1413 }
1409 } 1414 }
1410 1415
1411 p += rq_size; 1416 p += rq_size;
@@ -1416,7 +1421,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1416 return tags; 1421 return tags;
1417 1422
1418fail: 1423fail:
1419 pr_warn("%s: failed to allocate requests\n", __func__);
1420 blk_mq_free_rq_map(set, tags, hctx_idx); 1424 blk_mq_free_rq_map(set, tags, hctx_idx);
1421 return NULL; 1425 return NULL;
1422} 1426}
@@ -1936,6 +1940,61 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1936 return NOTIFY_OK; 1940 return NOTIFY_OK;
1937} 1941}
1938 1942
1943static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
1944{
1945 int i;
1946
1947 for (i = 0; i < set->nr_hw_queues; i++) {
1948 set->tags[i] = blk_mq_init_rq_map(set, i);
1949 if (!set->tags[i])
1950 goto out_unwind;
1951 }
1952
1953 return 0;
1954
1955out_unwind:
1956 while (--i >= 0)
1957 blk_mq_free_rq_map(set, set->tags[i], i);
1958
1959 set->tags = NULL;
1960 return -ENOMEM;
1961}
1962
1963/*
1964 * Allocate the request maps associated with this tag_set. Note that this
1965 * may reduce the depth asked for, if memory is tight. set->queue_depth
1966 * will be updated to reflect the allocated depth.
1967 */
1968static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
1969{
1970 unsigned int depth;
1971 int err;
1972
1973 depth = set->queue_depth;
1974 do {
1975 err = __blk_mq_alloc_rq_maps(set);
1976 if (!err)
1977 break;
1978
1979 set->queue_depth >>= 1;
1980 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
1981 err = -ENOMEM;
1982 break;
1983 }
1984 } while (set->queue_depth);
1985
1986 if (!set->queue_depth || err) {
1987 pr_err("blk-mq: failed to allocate request map\n");
1988 return -ENOMEM;
1989 }
1990
1991 if (depth != set->queue_depth)
1992 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
1993 depth, set->queue_depth);
1994
1995 return 0;
1996}
1997
1939/* 1998/*
1940 * Alloc a tag set to be associated with one or more request queues. 1999 * Alloc a tag set to be associated with one or more request queues.
1941 * May fail with EINVAL for various error conditions. May adjust the 2000 * May fail with EINVAL for various error conditions. May adjust the
@@ -1944,8 +2003,6 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1944 */ 2003 */
1945int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 2004int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1946{ 2005{
1947 int i;
1948
1949 if (!set->nr_hw_queues) 2006 if (!set->nr_hw_queues)
1950 return -EINVAL; 2007 return -EINVAL;
1951 if (!set->queue_depth) 2008 if (!set->queue_depth)
@@ -1966,23 +2023,18 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1966 sizeof(struct blk_mq_tags *), 2023 sizeof(struct blk_mq_tags *),
1967 GFP_KERNEL, set->numa_node); 2024 GFP_KERNEL, set->numa_node);
1968 if (!set->tags) 2025 if (!set->tags)
1969 goto out; 2026 return -ENOMEM;
1970 2027
1971 for (i = 0; i < set->nr_hw_queues; i++) { 2028 if (blk_mq_alloc_rq_maps(set))
1972 set->tags[i] = blk_mq_init_rq_map(set, i); 2029 goto enomem;
1973 if (!set->tags[i])
1974 goto out_unwind;
1975 }
1976 2030
1977 mutex_init(&set->tag_list_lock); 2031 mutex_init(&set->tag_list_lock);
1978 INIT_LIST_HEAD(&set->tag_list); 2032 INIT_LIST_HEAD(&set->tag_list);
1979 2033
1980 return 0; 2034 return 0;
1981 2035enomem:
1982out_unwind: 2036 kfree(set->tags);
1983 while (--i >= 0) 2037 set->tags = NULL;
1984 blk_mq_free_rq_map(set, set->tags[i], i);
1985out:
1986 return -ENOMEM; 2038 return -ENOMEM;
1987} 2039}
1988EXPORT_SYMBOL(blk_mq_alloc_tag_set); 2040EXPORT_SYMBOL(blk_mq_alloc_tag_set);
@@ -1997,6 +2049,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
1997 } 2049 }
1998 2050
1999 kfree(set->tags); 2051 kfree(set->tags);
2052 set->tags = NULL;
2000} 2053}
2001EXPORT_SYMBOL(blk_mq_free_tag_set); 2054EXPORT_SYMBOL(blk_mq_free_tag_set);
2002 2055
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 4db5abf96b9e..17f5c84ce7bf 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -554,8 +554,10 @@ int blk_register_queue(struct gendisk *disk)
554 * Initialization must be complete by now. Finish the initial 554 * Initialization must be complete by now. Finish the initial
555 * bypass from queue allocation. 555 * bypass from queue allocation.
556 */ 556 */
557 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); 557 if (!blk_queue_init_done(q)) {
558 blk_queue_bypass_end(q); 558 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
559 blk_queue_bypass_end(q);
560 }
559 561
560 ret = blk_trace_init_sysfs(dev); 562 ret = blk_trace_init_sysfs(dev);
561 if (ret) 563 if (ret)
diff --git a/block/genhd.c b/block/genhd.c
index 791f41943132..09da5e4a8e03 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -28,10 +28,10 @@ struct kobject *block_depr;
28/* for extended dynamic devt allocation, currently only one major is used */ 28/* for extended dynamic devt allocation, currently only one major is used */
29#define NR_EXT_DEVT (1 << MINORBITS) 29#define NR_EXT_DEVT (1 << MINORBITS)
30 30
31/* For extended devt allocation. ext_devt_mutex prevents look up 31/* For extended devt allocation. ext_devt_lock prevents look up
32 * results from going away underneath its user. 32 * results from going away underneath its user.
33 */ 33 */
34static DEFINE_MUTEX(ext_devt_mutex); 34static DEFINE_SPINLOCK(ext_devt_lock);
35static DEFINE_IDR(ext_devt_idr); 35static DEFINE_IDR(ext_devt_idr);
36 36
37static struct device_type disk_type; 37static struct device_type disk_type;
@@ -420,9 +420,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
420 } 420 }
421 421
422 /* allocate ext devt */ 422 /* allocate ext devt */
423 mutex_lock(&ext_devt_mutex); 423 idr_preload(GFP_KERNEL);
424 idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL); 424
425 mutex_unlock(&ext_devt_mutex); 425 spin_lock(&ext_devt_lock);
426 idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
427 spin_unlock(&ext_devt_lock);
428
429 idr_preload_end();
426 if (idx < 0) 430 if (idx < 0)
427 return idx == -ENOSPC ? -EBUSY : idx; 431 return idx == -ENOSPC ? -EBUSY : idx;
428 432
@@ -447,9 +451,9 @@ void blk_free_devt(dev_t devt)
447 return; 451 return;
448 452
449 if (MAJOR(devt) == BLOCK_EXT_MAJOR) { 453 if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
450 mutex_lock(&ext_devt_mutex); 454 spin_lock(&ext_devt_lock);
451 idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); 455 idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
452 mutex_unlock(&ext_devt_mutex); 456 spin_unlock(&ext_devt_lock);
453 } 457 }
454} 458}
455 459
@@ -665,7 +669,6 @@ void del_gendisk(struct gendisk *disk)
665 sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); 669 sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
666 pm_runtime_set_memalloc_noio(disk_to_dev(disk), false); 670 pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
667 device_del(disk_to_dev(disk)); 671 device_del(disk_to_dev(disk));
668 blk_free_devt(disk_to_dev(disk)->devt);
669} 672}
670EXPORT_SYMBOL(del_gendisk); 673EXPORT_SYMBOL(del_gendisk);
671 674
@@ -690,13 +693,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
690 } else { 693 } else {
691 struct hd_struct *part; 694 struct hd_struct *part;
692 695
693 mutex_lock(&ext_devt_mutex); 696 spin_lock(&ext_devt_lock);
694 part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); 697 part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
695 if (part && get_disk(part_to_disk(part))) { 698 if (part && get_disk(part_to_disk(part))) {
696 *partno = part->partno; 699 *partno = part->partno;
697 disk = part_to_disk(part); 700 disk = part_to_disk(part);
698 } 701 }
699 mutex_unlock(&ext_devt_mutex); 702 spin_unlock(&ext_devt_lock);
700 } 703 }
701 704
702 return disk; 705 return disk;
@@ -1098,6 +1101,7 @@ static void disk_release(struct device *dev)
1098{ 1101{
1099 struct gendisk *disk = dev_to_disk(dev); 1102 struct gendisk *disk = dev_to_disk(dev);
1100 1103
1104 blk_free_devt(dev->devt);
1101 disk_release_events(disk); 1105 disk_release_events(disk);
1102 kfree(disk->random); 1106 kfree(disk->random);
1103 disk_replace_part_tbl(disk, NULL); 1107 disk_replace_part_tbl(disk, NULL);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 789cdea05893..0d9e5f97f0a8 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -211,6 +211,7 @@ static const struct attribute_group *part_attr_groups[] = {
211static void part_release(struct device *dev) 211static void part_release(struct device *dev)
212{ 212{
213 struct hd_struct *p = dev_to_part(dev); 213 struct hd_struct *p = dev_to_part(dev);
214 blk_free_devt(dev->devt);
214 free_part_stats(p); 215 free_part_stats(p);
215 free_part_info(p); 216 free_part_info(p);
216 kfree(p); 217 kfree(p);
@@ -253,7 +254,6 @@ void delete_partition(struct gendisk *disk, int partno)
253 rcu_assign_pointer(ptbl->last_lookup, NULL); 254 rcu_assign_pointer(ptbl->last_lookup, NULL);
254 kobject_put(part->holder_dir); 255 kobject_put(part->holder_dir);
255 device_del(part_to_dev(part)); 256 device_del(part_to_dev(part));
256 blk_free_devt(part_devt(part));
257 257
258 hd_struct_put(part); 258 hd_struct_put(part);
259} 259}
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index db1e9560d8a7..5c8e7fe07745 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3918,7 +3918,6 @@ skip_create_disk:
3918 if (rv) { 3918 if (rv) {
3919 dev_err(&dd->pdev->dev, 3919 dev_err(&dd->pdev->dev,
3920 "Unable to allocate request queue\n"); 3920 "Unable to allocate request queue\n");
3921 rv = -ENOMEM;
3922 goto block_queue_alloc_init_error; 3921 goto block_queue_alloc_init_error;
3923 } 3922 }
3924 3923
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index a3b042c4d448..00d469c7f9f7 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -462,17 +462,21 @@ static int null_add_dev(void)
462 struct gendisk *disk; 462 struct gendisk *disk;
463 struct nullb *nullb; 463 struct nullb *nullb;
464 sector_t size; 464 sector_t size;
465 int rv;
465 466
466 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); 467 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
467 if (!nullb) 468 if (!nullb) {
469 rv = -ENOMEM;
468 goto out; 470 goto out;
471 }
469 472
470 spin_lock_init(&nullb->lock); 473 spin_lock_init(&nullb->lock);
471 474
472 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) 475 if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
473 submit_queues = nr_online_nodes; 476 submit_queues = nr_online_nodes;
474 477
475 if (setup_queues(nullb)) 478 rv = setup_queues(nullb);
479 if (rv)
476 goto out_free_nullb; 480 goto out_free_nullb;
477 481
478 if (queue_mode == NULL_Q_MQ) { 482 if (queue_mode == NULL_Q_MQ) {
@@ -484,22 +488,29 @@ static int null_add_dev(void)
484 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 488 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
485 nullb->tag_set.driver_data = nullb; 489 nullb->tag_set.driver_data = nullb;
486 490
487 if (blk_mq_alloc_tag_set(&nullb->tag_set)) 491 rv = blk_mq_alloc_tag_set(&nullb->tag_set);
492 if (rv)
488 goto out_cleanup_queues; 493 goto out_cleanup_queues;
489 494
490 nullb->q = blk_mq_init_queue(&nullb->tag_set); 495 nullb->q = blk_mq_init_queue(&nullb->tag_set);
491 if (!nullb->q) 496 if (!nullb->q) {
497 rv = -ENOMEM;
492 goto out_cleanup_tags; 498 goto out_cleanup_tags;
499 }
493 } else if (queue_mode == NULL_Q_BIO) { 500 } else if (queue_mode == NULL_Q_BIO) {
494 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); 501 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
495 if (!nullb->q) 502 if (!nullb->q) {
503 rv = -ENOMEM;
496 goto out_cleanup_queues; 504 goto out_cleanup_queues;
505 }
497 blk_queue_make_request(nullb->q, null_queue_bio); 506 blk_queue_make_request(nullb->q, null_queue_bio);
498 init_driver_queues(nullb); 507 init_driver_queues(nullb);
499 } else { 508 } else {
500 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); 509 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
501 if (!nullb->q) 510 if (!nullb->q) {
511 rv = -ENOMEM;
502 goto out_cleanup_queues; 512 goto out_cleanup_queues;
513 }
503 blk_queue_prep_rq(nullb->q, null_rq_prep_fn); 514 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
504 blk_queue_softirq_done(nullb->q, null_softirq_done_fn); 515 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
505 init_driver_queues(nullb); 516 init_driver_queues(nullb);
@@ -509,8 +520,10 @@ static int null_add_dev(void)
509 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); 520 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
510 521
511 disk = nullb->disk = alloc_disk_node(1, home_node); 522 disk = nullb->disk = alloc_disk_node(1, home_node);
512 if (!disk) 523 if (!disk) {
524 rv = -ENOMEM;
513 goto out_cleanup_blk_queue; 525 goto out_cleanup_blk_queue;
526 }
514 527
515 mutex_lock(&lock); 528 mutex_lock(&lock);
516 list_add_tail(&nullb->list, &nullb_list); 529 list_add_tail(&nullb->list, &nullb_list);
@@ -544,7 +557,7 @@ out_cleanup_queues:
544out_free_nullb: 557out_free_nullb:
545 kfree(nullb); 558 kfree(nullb);
546out: 559out:
547 return -ENOMEM; 560 return rv;
548} 561}
549 562
550static int __init null_init(void) 563static int __init null_init(void)