aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2013-07-15 17:02:23 -0400
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2013-09-03 16:44:16 -0400
commitcd63894630ab17a192bf97427d16dbec10710a6a (patch)
tree68070a88d52687c34d3fd96327d11f4a13e42950 /drivers/block
parent1894d8f16afe5ad54b732f0fa6c4e80bd4d40b91 (diff)
NVMe: Add pci suspend/resume driver callbacks
Used for going in and out of low power states. Resuming reuses the IO queues from the previous initialization, freeing any allocated queues that are no longer usable. Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/nvme-core.c73
1 files changed, 58 insertions, 15 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 23bb5a70d810..8efa728f1eac 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -789,6 +789,12 @@ static void nvme_make_request(struct request_queue *q, struct bio *bio)
789 struct nvme_queue *nvmeq = get_nvmeq(ns->dev); 789 struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
790 int result = -EBUSY; 790 int result = -EBUSY;
791 791
792 if (!nvmeq) {
793 put_nvmeq(NULL);
794 bio_endio(bio, -EIO);
795 return;
796 }
797
792 spin_lock_irq(&nvmeq->q_lock); 798 spin_lock_irq(&nvmeq->q_lock);
793 if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong)) 799 if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong))
794 result = nvme_submit_bio_queue(nvmeq, ns, bio); 800 result = nvme_submit_bio_queue(nvmeq, ns, bio);
@@ -1256,9 +1262,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1256 if (result < 0) 1262 if (result < 0)
1257 return result; 1263 return result;
1258 1264
1259 nvmeq = nvme_alloc_queue(dev, 0, 64, 0); 1265 nvmeq = dev->queues[0];
1260 if (!nvmeq) 1266 if (!nvmeq) {
1261 return -ENOMEM; 1267 nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
1268 if (!nvmeq)
1269 return -ENOMEM;
1270 dev->queues[0] = nvmeq;
1271 }
1262 1272
1263 aqa = nvmeq->q_depth - 1; 1273 aqa = nvmeq->q_depth - 1;
1264 aqa |= aqa << 16; 1274 aqa |= aqa << 16;
@@ -1275,21 +1285,16 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1275 1285
1276 result = nvme_enable_ctrl(dev, cap); 1286 result = nvme_enable_ctrl(dev, cap);
1277 if (result) 1287 if (result)
1278 goto free_q; 1288 return result;
1279 1289
1280 result = queue_request_irq(dev, nvmeq, "nvme admin"); 1290 result = queue_request_irq(dev, nvmeq, "nvme admin");
1281 if (result) 1291 if (result)
1282 goto free_q; 1292 return result;
1283 1293
1284 dev->queues[0] = nvmeq;
1285 spin_lock(&nvmeq->q_lock); 1294 spin_lock(&nvmeq->q_lock);
1286 nvme_init_queue(nvmeq, 0); 1295 nvme_init_queue(nvmeq, 0);
1287 spin_unlock(&nvmeq->q_lock); 1296 spin_unlock(&nvmeq->q_lock);
1288 return result; 1297 return result;
1289
1290 free_q:
1291 nvme_free_queue(nvmeq);
1292 return result;
1293} 1298}
1294 1299
1295struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, 1300struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
@@ -1797,6 +1802,21 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1797 if (result) 1802 if (result)
1798 goto free_queues; 1803 goto free_queues;
1799 1804
1805 /* Free previously allocated queues that are no longer usable */
1806 spin_lock(&dev_list_lock);
1807 for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
1808 struct nvme_queue *nvmeq = dev->queues[i];
1809
1810 spin_lock(&nvmeq->q_lock);
1811 nvme_cancel_ios(nvmeq, false);
1812 spin_unlock(&nvmeq->q_lock);
1813
1814 nvme_free_queue(nvmeq);
1815 dev->queue_count--;
1816 dev->queues[i] = NULL;
1817 }
1818 spin_unlock(&dev_list_lock);
1819
1800 cpu = cpumask_first(cpu_online_mask); 1820 cpu = cpumask_first(cpu_online_mask);
1801 for (i = 0; i < nr_io_queues; i++) { 1821 for (i = 0; i < nr_io_queues; i++) {
1802 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu)); 1822 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
@@ -1805,7 +1825,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1805 1825
1806 q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1, 1826 q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
1807 NVME_Q_DEPTH); 1827 NVME_Q_DEPTH);
1808 for (i = 0; i < nr_io_queues; i++) { 1828 for (i = dev->queue_count - 1; i < nr_io_queues; i++) {
1809 dev->queues[i + 1] = nvme_alloc_queue(dev, i + 1, q_depth, i); 1829 dev->queues[i + 1] = nvme_alloc_queue(dev, i + 1, q_depth, i);
1810 if (!dev->queues[i + 1]) { 1830 if (!dev->queues[i + 1]) {
1811 result = -ENOMEM; 1831 result = -ENOMEM;
@@ -2191,8 +2211,30 @@ static void nvme_remove(struct pci_dev *pdev)
2191#define nvme_link_reset NULL 2211#define nvme_link_reset NULL
2192#define nvme_slot_reset NULL 2212#define nvme_slot_reset NULL
2193#define nvme_error_resume NULL 2213#define nvme_error_resume NULL
2194#define nvme_suspend NULL 2214
2195#define nvme_resume NULL 2215static int nvme_suspend(struct device *dev)
2216{
2217 struct pci_dev *pdev = to_pci_dev(dev);
2218 struct nvme_dev *ndev = pci_get_drvdata(pdev);
2219
2220 nvme_dev_shutdown(ndev);
2221 return 0;
2222}
2223
2224static int nvme_resume(struct device *dev)
2225{
2226 struct pci_dev *pdev = to_pci_dev(dev);
2227 struct nvme_dev *ndev = pci_get_drvdata(pdev);
2228 int ret;
2229
2230 ret = nvme_dev_start(ndev);
2231 /* XXX: should remove gendisks if resume fails */
2232 if (ret)
2233 nvme_free_queues(ndev);
2234 return ret;
2235}
2236
2237static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
2196 2238
2197static const struct pci_error_handlers nvme_err_handler = { 2239static const struct pci_error_handlers nvme_err_handler = {
2198 .error_detected = nvme_error_detected, 2240 .error_detected = nvme_error_detected,
@@ -2216,8 +2258,9 @@ static struct pci_driver nvme_driver = {
2216 .id_table = nvme_id_table, 2258 .id_table = nvme_id_table,
2217 .probe = nvme_probe, 2259 .probe = nvme_probe,
2218 .remove = nvme_remove, 2260 .remove = nvme_remove,
2219 .suspend = nvme_suspend, 2261 .driver = {
2220 .resume = nvme_resume, 2262 .pm = &nvme_dev_pm_ops,
2263 },
2221 .err_handler = &nvme_err_handler, 2264 .err_handler = &nvme_err_handler,
2222}; 2265};
2223 2266