aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/nvme.c
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2012-07-27 13:57:23 -0400
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2012-07-27 13:57:23 -0400
commita0cadb85b8b758608ae0759151e29de7581c6731 (patch)
tree3fc3c95f4cef33866a1da81daa86906bd5fa762c /drivers/block/nvme.c
parent8fc23e032debd682f5ba9fc524a5846c10d2c522 (diff)
NVMe: Do not set IO queue depth beyond device max
Set the depth for IO queues to the device's maximum supported queue entries if the requested depth exceeds the device's capabilities. Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block/nvme.c')
-rw-r--r--drivers/block/nvme.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 11951fa11a90..af1ef39bd6b4 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -893,7 +893,8 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
893 int depth, int vector) 893 int depth, int vector)
894{ 894{
895 struct device *dmadev = &dev->pci_dev->dev; 895 struct device *dmadev = &dev->pci_dev->dev;
896 unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info)); 896 unsigned extra = DIV_ROUND_UP(depth, 8) + (depth *
897 sizeof(struct nvme_cmd_info));
897 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); 898 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
898 if (!nvmeq) 899 if (!nvmeq)
899 return NULL; 900 return NULL;
@@ -1391,7 +1392,7 @@ static int set_queue_count(struct nvme_dev *dev, int count)
1391 1392
1392static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) 1393static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
1393{ 1394{
1394 int result, cpu, i, nr_io_queues, db_bar_size; 1395 int result, cpu, i, nr_io_queues, db_bar_size, q_depth;
1395 1396
1396 nr_io_queues = num_online_cpus(); 1397 nr_io_queues = num_online_cpus();
1397 result = set_queue_count(dev, nr_io_queues); 1398 result = set_queue_count(dev, nr_io_queues);
@@ -1437,9 +1438,10 @@ static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
1437 cpu = cpumask_next(cpu, cpu_online_mask); 1438 cpu = cpumask_next(cpu, cpu_online_mask);
1438 } 1439 }
1439 1440
1441 q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
1442 NVME_Q_DEPTH);
1440 for (i = 0; i < nr_io_queues; i++) { 1443 for (i = 0; i < nr_io_queues; i++) {
1441 dev->queues[i + 1] = nvme_create_queue(dev, i + 1, 1444 dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i);
1442 NVME_Q_DEPTH, i);
1443 if (IS_ERR(dev->queues[i + 1])) 1445 if (IS_ERR(dev->queues[i + 1]))
1444 return PTR_ERR(dev->queues[i + 1]); 1446 return PTR_ERR(dev->queues[i + 1]);
1445 dev->queue_count++; 1447 dev->queue_count++;