diff options
author | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2011-10-20 17:00:41 -0400 |
---|---|---|
committer | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2011-11-04 15:53:05 -0400 |
commit | f1938f6e1ee1583c87ec74dc406fdd8694e99ac8 (patch) | |
tree | a1e27b0feb844ac675ba90230e8411dce7a86f71 /drivers/block/nvme.c | |
parent | ce38c149576fd0a3360fec3bef4012212d42e736 (diff) |
NVMe: Implement doorbell stride capability
The doorbell stride allows devices to spread out their doorbells instead
of packing them tightly. This feature was added as part of ECN 003.
This patch also enables support for more than 512 queues :-)
Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block/nvme.c')
-rw-r--r-- | drivers/block/nvme.c | 17 |
1 files changed, 14 insertions, 3 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c index cfe5932821d8..a17f80fa3881 100644 --- a/drivers/block/nvme.c +++ b/drivers/block/nvme.c | |||
@@ -70,6 +70,7 @@ struct nvme_dev { | |||
70 | struct dma_pool *prp_small_pool; | 70 | struct dma_pool *prp_small_pool; |
71 | int instance; | 71 | int instance; |
72 | int queue_count; | 72 | int queue_count; |
73 | int db_stride; | ||
73 | u32 ctrl_config; | 74 | u32 ctrl_config; |
74 | struct msix_entry *entry; | 75 | struct msix_entry *entry; |
75 | struct nvme_bar __iomem *bar; | 76 | struct nvme_bar __iomem *bar; |
@@ -672,7 +673,7 @@ static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq) | |||
672 | if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) | 673 | if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) |
673 | return IRQ_NONE; | 674 | return IRQ_NONE; |
674 | 675 | ||
675 | writel(head, nvmeq->q_db + 1); | 676 | writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride)); |
676 | nvmeq->cq_head = head; | 677 | nvmeq->cq_head = head; |
677 | nvmeq->cq_phase = phase; | 678 | nvmeq->cq_phase = phase; |
678 | 679 | ||
@@ -889,7 +890,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, | |||
889 | init_waitqueue_head(&nvmeq->sq_full); | 890 | init_waitqueue_head(&nvmeq->sq_full); |
890 | init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); | 891 | init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); |
891 | bio_list_init(&nvmeq->sq_cong); | 892 | bio_list_init(&nvmeq->sq_cong); |
892 | nvmeq->q_db = &dev->dbs[qid * 2]; | 893 | nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)]; |
893 | nvmeq->q_depth = depth; | 894 | nvmeq->q_depth = depth; |
894 | nvmeq->cq_vector = vector; | 895 | nvmeq->cq_vector = vector; |
895 | 896 | ||
@@ -981,6 +982,7 @@ static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev) | |||
981 | 982 | ||
982 | cap = readq(&dev->bar->cap); | 983 | cap = readq(&dev->bar->cap); |
983 | timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; | 984 | timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; |
985 | dev->db_stride = NVME_CAP_STRIDE(cap); | ||
984 | 986 | ||
985 | while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { | 987 | while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { |
986 | msleep(100); | 988 | msleep(100); |
@@ -1357,7 +1359,7 @@ static int set_queue_count(struct nvme_dev *dev, int count) | |||
1357 | 1359 | ||
1358 | static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) | 1360 | static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) |
1359 | { | 1361 | { |
1360 | int result, cpu, i, nr_io_queues; | 1362 | int result, cpu, i, nr_io_queues, db_bar_size; |
1361 | 1363 | ||
1362 | nr_io_queues = num_online_cpus(); | 1364 | nr_io_queues = num_online_cpus(); |
1363 | result = set_queue_count(dev, nr_io_queues); | 1365 | result = set_queue_count(dev, nr_io_queues); |
@@ -1369,6 +1371,15 @@ static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) | |||
1369 | /* Deregister the admin queue's interrupt */ | 1371 | /* Deregister the admin queue's interrupt */ |
1370 | free_irq(dev->entry[0].vector, dev->queues[0]); | 1372 | free_irq(dev->entry[0].vector, dev->queues[0]); |
1371 | 1373 | ||
1374 | db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); | ||
1375 | if (db_bar_size > 8192) { | ||
1376 | iounmap(dev->bar); | ||
1377 | dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0), | ||
1378 | db_bar_size); | ||
1379 | dev->dbs = ((void __iomem *)dev->bar) + 4096; | ||
1380 | dev->queues[0]->q_db = dev->dbs; | ||
1381 | } | ||
1382 | |||
1372 | for (i = 0; i < nr_io_queues; i++) | 1383 | for (i = 0; i < nr_io_queues; i++) |
1373 | dev->entry[i].entry = i; | 1384 | dev->entry[i].entry = i; |
1374 | for (;;) { | 1385 | for (;;) { |