summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c16
-rw-r--r--block/blk-mq.c2
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/null_blk_main.c3
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/lightnvm/core.c2
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--include/linux/blkdev.h3
10 files changed, 12 insertions, 24 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 5c8e66a09d82..3f94c9de0252 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -393,7 +393,7 @@ EXPORT_SYMBOL(blk_cleanup_queue);
393 393
394struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 394struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
395{ 395{
396 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE, NULL); 396 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
397} 397}
398EXPORT_SYMBOL(blk_alloc_queue); 398EXPORT_SYMBOL(blk_alloc_queue);
399 399
@@ -473,17 +473,8 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
473 * blk_alloc_queue_node - allocate a request queue 473 * blk_alloc_queue_node - allocate a request queue
474 * @gfp_mask: memory allocation flags 474 * @gfp_mask: memory allocation flags
475 * @node_id: NUMA node to allocate memory from 475 * @node_id: NUMA node to allocate memory from
476 * @lock: For legacy queues, pointer to a spinlock that will be used to e.g.
477 * serialize calls to the legacy .request_fn() callback. Ignored for
478 * blk-mq request queues.
479 *
480 * Note: pass the queue lock as the third argument to this function instead of
481 * setting the queue lock pointer explicitly to avoid triggering a sporadic
482 * crash in the blkcg code. This function namely calls blkcg_init_queue() and
483 * the queue lock pointer must be set before blkcg_init_queue() is called.
484 */ 476 */
485struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, 477struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
486 spinlock_t *lock)
487{ 478{
488 struct request_queue *q; 479 struct request_queue *q;
489 int ret; 480 int ret;
@@ -534,8 +525,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
534#endif 525#endif
535 mutex_init(&q->sysfs_lock); 526 mutex_init(&q->sysfs_lock);
536 spin_lock_init(&q->__queue_lock); 527 spin_lock_init(&q->__queue_lock);
537 528 q->queue_lock = &q->__queue_lock;
538 q->queue_lock = lock ? : &q->__queue_lock;
539 529
540 init_waitqueue_head(&q->mq_freeze_wq); 530 init_waitqueue_head(&q->mq_freeze_wq);
541 531
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a3f057fdd045..3b823891b3ef 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2548,7 +2548,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2548{ 2548{
2549 struct request_queue *uninit_q, *q; 2549 struct request_queue *uninit_q, *q;
2550 2550
2551 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node, NULL); 2551 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2552 if (!uninit_q) 2552 if (!uninit_q)
2553 return ERR_PTR(-ENOMEM); 2553 return ERR_PTR(-ENOMEM);
2554 2554
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index b66c59ce6260..f973a2a845c8 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2792,7 +2792,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
2792 2792
2793 drbd_init_set_defaults(device); 2793 drbd_init_set_defaults(device);
2794 2794
2795 q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL); 2795 q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
2796 if (!q) 2796 if (!q)
2797 goto out_no_q; 2797 goto out_no_q;
2798 device->rq_queue = q; 2798 device->rq_queue = q;
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 63c23fcfc4df..62c9654b9ce8 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1659,8 +1659,7 @@ static int null_add_dev(struct nullb_device *dev)
1659 } 1659 }
1660 null_init_queues(nullb); 1660 null_init_queues(nullb);
1661 } else if (dev->queue_mode == NULL_Q_BIO) { 1661 } else if (dev->queue_mode == NULL_Q_BIO) {
1662 nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node, 1662 nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
1663 NULL);
1664 if (!nullb->q) { 1663 if (!nullb->q) {
1665 rv = -ENOMEM; 1664 rv = -ENOMEM;
1666 goto out_cleanup_queues; 1665 goto out_cleanup_queues;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 8a27b5adc2b3..aa035cf8a51d 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -888,7 +888,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
888 card->biotail = &card->bio; 888 card->biotail = &card->bio;
889 spin_lock_init(&card->lock); 889 spin_lock_init(&card->lock);
890 890
891 card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL); 891 card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
892 if (!card->queue) 892 if (!card->queue)
893 goto failed_alloc; 893 goto failed_alloc;
894 894
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index efb976a863d2..60ab11fcc81c 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -389,7 +389,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
389 goto err_dev; 389 goto err_dev;
390 } 390 }
391 391
392 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node, NULL); 392 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
393 if (!tqueue) { 393 if (!tqueue) {
394 ret = -ENOMEM; 394 ret = -ENOMEM;
395 goto err_disk; 395 goto err_disk;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index c510179a7f84..a733e4c920af 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1896,7 +1896,7 @@ static struct mapped_device *alloc_dev(int minor)
1896 INIT_LIST_HEAD(&md->table_devices); 1896 INIT_LIST_HEAD(&md->table_devices);
1897 spin_lock_init(&md->uevent_lock); 1897 spin_lock_init(&md->uevent_lock);
1898 1898
1899 md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id, NULL); 1899 md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
1900 if (!md->queue) 1900 if (!md->queue)
1901 goto bad; 1901 goto bad;
1902 md->queue->queuedata = md; 1902 md->queue->queuedata = md;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 0e39e3d1846f..f7019294740c 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -393,7 +393,7 @@ static int pmem_attach_disk(struct device *dev,
393 return -EBUSY; 393 return -EBUSY;
394 } 394 }
395 395
396 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev), NULL); 396 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
397 if (!q) 397 if (!q)
398 return -ENOMEM; 398 return -ENOMEM;
399 399
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 5e3cc8c59a39..b82b0d3ca39a 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -276,7 +276,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
276 if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath) 276 if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
277 return 0; 277 return 0;
278 278
279 q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL); 279 q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
280 if (!q) 280 if (!q)
281 goto out; 281 goto out;
282 q->queuedata = head; 282 q->queuedata = head;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 30d8e0fbd104..c4a3a660e3f0 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1122,8 +1122,7 @@ extern long nr_blockdev_pages(void);
1122 1122
1123bool __must_check blk_get_queue(struct request_queue *); 1123bool __must_check blk_get_queue(struct request_queue *);
1124struct request_queue *blk_alloc_queue(gfp_t); 1124struct request_queue *blk_alloc_queue(gfp_t);
1125struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, 1125struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id);
1126 spinlock_t *lock);
1127extern void blk_put_queue(struct request_queue *); 1126extern void blk_put_queue(struct request_queue *);
1128extern void blk_set_queue_dying(struct request_queue *); 1127extern void blk_set_queue_dying(struct request_queue *);
1129 1128