aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-05-22 05:12:39 -0400
committerJens Axboe <axboe@fb.com>2015-05-22 10:36:33 -0400
commite75ec752d725b7b612c0b2db1bca50a9e53c0879 (patch)
tree94c1a00fa02774dc49bbe3d5ecefe42689d545f5
parentf705f837c58ebe1ea69dfffff4dcc234e2fbc8dd (diff)
nvme: store a struct device pointer in struct nvme_dev
Most users want the generic device, so store that in struct nvme_dev instead of the pci_dev. This also happens to be a nice step towards making some code reusable for non-PCI transports. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/block/nvme-core.c110
-rw-r--r--drivers/block/nvme-scsi.c63
-rw-r--r--include/linux/nvme.h2
3 files changed, 79 insertions, 96 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index e81b205ffd04..870a926e1ddc 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -610,17 +610,17 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
610 req->errors = 0; 610 req->errors = 0;
611 611
612 if (cmd_rq->aborted) 612 if (cmd_rq->aborted)
613 dev_warn(&nvmeq->dev->pci_dev->dev, 613 dev_warn(nvmeq->dev->dev,
614 "completing aborted command with status:%04x\n", 614 "completing aborted command with status:%04x\n",
615 status); 615 status);
616 616
617 if (iod->nents) { 617 if (iod->nents) {
618 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents, 618 dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents,
619 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 619 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
620 if (blk_integrity_rq(req)) { 620 if (blk_integrity_rq(req)) {
621 if (!rq_data_dir(req)) 621 if (!rq_data_dir(req))
622 nvme_dif_remap(req, nvme_dif_complete); 622 nvme_dif_remap(req, nvme_dif_complete);
623 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->meta_sg, 1, 623 dma_unmap_sg(nvmeq->dev->dev, iod->meta_sg, 1,
624 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 624 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
625 } 625 }
626 } 626 }
@@ -861,7 +861,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
861 861
862 if (blk_rq_bytes(req) != 862 if (blk_rq_bytes(req) !=
863 nvme_setup_prps(nvmeq->dev, iod, blk_rq_bytes(req), GFP_ATOMIC)) { 863 nvme_setup_prps(nvmeq->dev, iod, blk_rq_bytes(req), GFP_ATOMIC)) {
864 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, 864 dma_unmap_sg(nvmeq->dev->dev, iod->sg,
865 iod->nents, dma_dir); 865 iod->nents, dma_dir);
866 goto retry_cmd; 866 goto retry_cmd;
867 } 867 }
@@ -1192,8 +1192,7 @@ static void nvme_abort_req(struct request *req)
1192 if (work_busy(&dev->reset_work)) 1192 if (work_busy(&dev->reset_work))
1193 goto out; 1193 goto out;
1194 list_del_init(&dev->node); 1194 list_del_init(&dev->node);
1195 dev_warn(&dev->pci_dev->dev, 1195 dev_warn(dev->dev, "I/O %d QID %d timeout, reset controller\n",
1196 "I/O %d QID %d timeout, reset controller\n",
1197 req->tag, nvmeq->qid); 1196 req->tag, nvmeq->qid);
1198 dev->reset_workfn = nvme_reset_failed_dev; 1197 dev->reset_workfn = nvme_reset_failed_dev;
1199 queue_work(nvme_workq, &dev->reset_work); 1198 queue_work(nvme_workq, &dev->reset_work);
@@ -1362,22 +1361,21 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
1362static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 1361static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1363 int depth) 1362 int depth)
1364{ 1363{
1365 struct device *dmadev = &dev->pci_dev->dev;
1366 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); 1364 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
1367 if (!nvmeq) 1365 if (!nvmeq)
1368 return NULL; 1366 return NULL;
1369 1367
1370 nvmeq->cqes = dma_zalloc_coherent(dmadev, CQ_SIZE(depth), 1368 nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
1371 &nvmeq->cq_dma_addr, GFP_KERNEL); 1369 &nvmeq->cq_dma_addr, GFP_KERNEL);
1372 if (!nvmeq->cqes) 1370 if (!nvmeq->cqes)
1373 goto free_nvmeq; 1371 goto free_nvmeq;
1374 1372
1375 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth), 1373 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
1376 &nvmeq->sq_dma_addr, GFP_KERNEL); 1374 &nvmeq->sq_dma_addr, GFP_KERNEL);
1377 if (!nvmeq->sq_cmds) 1375 if (!nvmeq->sq_cmds)
1378 goto free_cqdma; 1376 goto free_cqdma;
1379 1377
1380 nvmeq->q_dmadev = dmadev; 1378 nvmeq->q_dmadev = dev->dev;
1381 nvmeq->dev = dev; 1379 nvmeq->dev = dev;
1382 snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d", 1380 snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
1383 dev->instance, qid); 1381 dev->instance, qid);
@@ -1393,7 +1391,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1393 return nvmeq; 1391 return nvmeq;
1394 1392
1395 free_cqdma: 1393 free_cqdma:
1396 dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes, 1394 dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
1397 nvmeq->cq_dma_addr); 1395 nvmeq->cq_dma_addr);
1398 free_nvmeq: 1396 free_nvmeq:
1399 kfree(nvmeq); 1397 kfree(nvmeq);
@@ -1465,7 +1463,7 @@ static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
1465 if (fatal_signal_pending(current)) 1463 if (fatal_signal_pending(current))
1466 return -EINTR; 1464 return -EINTR;
1467 if (time_after(jiffies, timeout)) { 1465 if (time_after(jiffies, timeout)) {
1468 dev_err(&dev->pci_dev->dev, 1466 dev_err(dev->dev,
1469 "Device not ready; aborting %s\n", enabled ? 1467 "Device not ready; aborting %s\n", enabled ?
1470 "initialisation" : "reset"); 1468 "initialisation" : "reset");
1471 return -ENODEV; 1469 return -ENODEV;
@@ -1515,7 +1513,7 @@ static int nvme_shutdown_ctrl(struct nvme_dev *dev)
1515 if (fatal_signal_pending(current)) 1513 if (fatal_signal_pending(current))
1516 return -EINTR; 1514 return -EINTR;
1517 if (time_after(jiffies, timeout)) { 1515 if (time_after(jiffies, timeout)) {
1518 dev_err(&dev->pci_dev->dev, 1516 dev_err(dev->dev,
1519 "Device shutdown incomplete; abort shutdown\n"); 1517 "Device shutdown incomplete; abort shutdown\n");
1520 return -ENODEV; 1518 return -ENODEV;
1521 } 1519 }
@@ -1558,7 +1556,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
1558 dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1; 1556 dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1;
1559 dev->admin_tagset.reserved_tags = 1; 1557 dev->admin_tagset.reserved_tags = 1;
1560 dev->admin_tagset.timeout = ADMIN_TIMEOUT; 1558 dev->admin_tagset.timeout = ADMIN_TIMEOUT;
1561 dev->admin_tagset.numa_node = dev_to_node(&dev->pci_dev->dev); 1559 dev->admin_tagset.numa_node = dev_to_node(dev->dev);
1562 dev->admin_tagset.cmd_size = nvme_cmd_size(dev); 1560 dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
1563 dev->admin_tagset.driver_data = dev; 1561 dev->admin_tagset.driver_data = dev;
1564 1562
@@ -1591,14 +1589,14 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1591 unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12; 1589 unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
1592 1590
1593 if (page_shift < dev_page_min) { 1591 if (page_shift < dev_page_min) {
1594 dev_err(&dev->pci_dev->dev, 1592 dev_err(dev->dev,
1595 "Minimum device page size (%u) too large for " 1593 "Minimum device page size (%u) too large for "
1596 "host (%u)\n", 1 << dev_page_min, 1594 "host (%u)\n", 1 << dev_page_min,
1597 1 << page_shift); 1595 1 << page_shift);
1598 return -ENODEV; 1596 return -ENODEV;
1599 } 1597 }
1600 if (page_shift > dev_page_max) { 1598 if (page_shift > dev_page_max) {
1601 dev_info(&dev->pci_dev->dev, 1599 dev_info(dev->dev,
1602 "Device maximum page size (%u) smaller than " 1600 "Device maximum page size (%u) smaller than "
1603 "host (%u); enabling work-around\n", 1601 "host (%u); enabling work-around\n",
1604 1 << dev_page_max, 1 << page_shift); 1602 1 << dev_page_max, 1 << page_shift);
@@ -1689,7 +1687,7 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1689 sg_mark_end(&sg[i - 1]); 1687 sg_mark_end(&sg[i - 1]);
1690 iod->nents = count; 1688 iod->nents = count;
1691 1689
1692 nents = dma_map_sg(&dev->pci_dev->dev, sg, count, 1690 nents = dma_map_sg(dev->dev, sg, count,
1693 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1691 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1694 if (!nents) 1692 if (!nents)
1695 goto free_iod; 1693 goto free_iod;
@@ -1711,7 +1709,7 @@ void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
1711{ 1709{
1712 int i; 1710 int i;
1713 1711
1714 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, 1712 dma_unmap_sg(dev->dev, iod->sg, iod->nents,
1715 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1713 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1716 1714
1717 for (i = 0; i < iod->nents; i++) 1715 for (i = 0; i < iod->nents; i++)
@@ -1762,7 +1760,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1762 goto unmap; 1760 goto unmap;
1763 } 1761 }
1764 if (meta_len) { 1762 if (meta_len) {
1765 meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len, 1763 meta = dma_alloc_coherent(dev->dev, meta_len,
1766 &meta_dma, GFP_KERNEL); 1764 &meta_dma, GFP_KERNEL);
1767 if (!meta) { 1765 if (!meta) {
1768 status = -ENOMEM; 1766 status = -ENOMEM;
@@ -1801,7 +1799,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1801 meta_len)) 1799 meta_len))
1802 status = -EFAULT; 1800 status = -EFAULT;
1803 } 1801 }
1804 dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma); 1802 dma_free_coherent(dev->dev, meta_len, meta, meta_dma);
1805 } 1803 }
1806 return status; 1804 return status;
1807} 1805}
@@ -1961,15 +1959,13 @@ static int nvme_revalidate_disk(struct gendisk *disk)
1961 u16 old_ms; 1959 u16 old_ms;
1962 unsigned short bs; 1960 unsigned short bs;
1963 1961
1964 id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr, 1962 id = dma_alloc_coherent(dev->dev, 4096, &dma_addr, GFP_KERNEL);
1965 GFP_KERNEL);
1966 if (!id) { 1963 if (!id) {
1967 dev_warn(&dev->pci_dev->dev, "%s: Memory alocation failure\n", 1964 dev_warn(dev->dev, "%s: Memory alocation failure\n", __func__);
1968 __func__);
1969 return 0; 1965 return 0;
1970 } 1966 }
1971 if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) { 1967 if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) {
1972 dev_warn(&dev->pci_dev->dev, 1968 dev_warn(dev->dev,
1973 "identify failed ns:%d, setting capacity to 0\n", 1969 "identify failed ns:%d, setting capacity to 0\n",
1974 ns->ns_id); 1970 ns->ns_id);
1975 memset(id, 0, sizeof(*id)); 1971 memset(id, 0, sizeof(*id));
@@ -2014,7 +2010,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
2014 if (dev->oncs & NVME_CTRL_ONCS_DSM) 2010 if (dev->oncs & NVME_CTRL_ONCS_DSM)
2015 nvme_config_discard(ns); 2011 nvme_config_discard(ns);
2016 2012
2017 dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr); 2013 dma_free_coherent(dev->dev, 4096, id, dma_addr);
2018 return 0; 2014 return 0;
2019} 2015}
2020 2016
@@ -2041,7 +2037,7 @@ static int nvme_kthread(void *data)
2041 if (work_busy(&dev->reset_work)) 2037 if (work_busy(&dev->reset_work))
2042 continue; 2038 continue;
2043 list_del_init(&dev->node); 2039 list_del_init(&dev->node);
2044 dev_warn(&dev->pci_dev->dev, 2040 dev_warn(dev->dev,
2045 "Failed status: %x, reset controller\n", 2041 "Failed status: %x, reset controller\n",
2046 readl(&dev->bar->csts)); 2042 readl(&dev->bar->csts));
2047 dev->reset_workfn = nvme_reset_failed_dev; 2043 dev->reset_workfn = nvme_reset_failed_dev;
@@ -2073,7 +2069,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
2073{ 2069{
2074 struct nvme_ns *ns; 2070 struct nvme_ns *ns;
2075 struct gendisk *disk; 2071 struct gendisk *disk;
2076 int node = dev_to_node(&dev->pci_dev->dev); 2072 int node = dev_to_node(dev->dev);
2077 2073
2078 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 2074 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
2079 if (!ns) 2075 if (!ns)
@@ -2156,8 +2152,7 @@ static int set_queue_count(struct nvme_dev *dev, int count)
2156 if (status < 0) 2152 if (status < 0)
2157 return status; 2153 return status;
2158 if (status > 0) { 2154 if (status > 0) {
2159 dev_err(&dev->pci_dev->dev, "Could not set queue count (%d)\n", 2155 dev_err(dev->dev, "Could not set queue count (%d)\n", status);
2160 status);
2161 return 0; 2156 return 0;
2162 } 2157 }
2163 return min(result & 0xffff, result >> 16) + 1; 2158 return min(result & 0xffff, result >> 16) + 1;
@@ -2171,7 +2166,7 @@ static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
2171static int nvme_setup_io_queues(struct nvme_dev *dev) 2166static int nvme_setup_io_queues(struct nvme_dev *dev)
2172{ 2167{
2173 struct nvme_queue *adminq = dev->queues[0]; 2168 struct nvme_queue *adminq = dev->queues[0];
2174 struct pci_dev *pdev = dev->pci_dev; 2169 struct pci_dev *pdev = to_pci_dev(dev->dev);
2175 int result, i, vecs, nr_io_queues, size; 2170 int result, i, vecs, nr_io_queues, size;
2176 2171
2177 nr_io_queues = num_possible_cpus(); 2172 nr_io_queues = num_possible_cpus();
@@ -2251,7 +2246,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
2251 */ 2246 */
2252static int nvme_dev_add(struct nvme_dev *dev) 2247static int nvme_dev_add(struct nvme_dev *dev)
2253{ 2248{
2254 struct pci_dev *pdev = dev->pci_dev; 2249 struct pci_dev *pdev = to_pci_dev(dev->dev);
2255 int res; 2250 int res;
2256 unsigned nn, i; 2251 unsigned nn, i;
2257 struct nvme_id_ctrl *ctrl; 2252 struct nvme_id_ctrl *ctrl;
@@ -2259,14 +2254,14 @@ static int nvme_dev_add(struct nvme_dev *dev)
2259 dma_addr_t dma_addr; 2254 dma_addr_t dma_addr;
2260 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; 2255 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
2261 2256
2262 mem = dma_alloc_coherent(&pdev->dev, 4096, &dma_addr, GFP_KERNEL); 2257 mem = dma_alloc_coherent(dev->dev, 4096, &dma_addr, GFP_KERNEL);
2263 if (!mem) 2258 if (!mem)
2264 return -ENOMEM; 2259 return -ENOMEM;
2265 2260
2266 res = nvme_identify(dev, 0, 1, dma_addr); 2261 res = nvme_identify(dev, 0, 1, dma_addr);
2267 if (res) { 2262 if (res) {
2268 dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res); 2263 dev_err(dev->dev, "Identify Controller failed (%d)\n", res);
2269 dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr); 2264 dma_free_coherent(dev->dev, 4096, mem, dma_addr);
2270 return -EIO; 2265 return -EIO;
2271 } 2266 }
2272 2267
@@ -2292,12 +2287,12 @@ static int nvme_dev_add(struct nvme_dev *dev)
2292 } else 2287 } else
2293 dev->max_hw_sectors = max_hw_sectors; 2288 dev->max_hw_sectors = max_hw_sectors;
2294 } 2289 }
2295 dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr); 2290 dma_free_coherent(dev->dev, 4096, mem, dma_addr);
2296 2291
2297 dev->tagset.ops = &nvme_mq_ops; 2292 dev->tagset.ops = &nvme_mq_ops;
2298 dev->tagset.nr_hw_queues = dev->online_queues - 1; 2293 dev->tagset.nr_hw_queues = dev->online_queues - 1;
2299 dev->tagset.timeout = NVME_IO_TIMEOUT; 2294 dev->tagset.timeout = NVME_IO_TIMEOUT;
2300 dev->tagset.numa_node = dev_to_node(&dev->pci_dev->dev); 2295 dev->tagset.numa_node = dev_to_node(dev->dev);
2301 dev->tagset.queue_depth = 2296 dev->tagset.queue_depth =
2302 min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; 2297 min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
2303 dev->tagset.cmd_size = nvme_cmd_size(dev); 2298 dev->tagset.cmd_size = nvme_cmd_size(dev);
@@ -2317,7 +2312,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
2317{ 2312{
2318 u64 cap; 2313 u64 cap;
2319 int bars, result = -ENOMEM; 2314 int bars, result = -ENOMEM;
2320 struct pci_dev *pdev = dev->pci_dev; 2315 struct pci_dev *pdev = to_pci_dev(dev->dev);
2321 2316
2322 if (pci_enable_device_mem(pdev)) 2317 if (pci_enable_device_mem(pdev))
2323 return result; 2318 return result;
@@ -2331,8 +2326,8 @@ static int nvme_dev_map(struct nvme_dev *dev)
2331 if (pci_request_selected_regions(pdev, bars, "nvme")) 2326 if (pci_request_selected_regions(pdev, bars, "nvme"))
2332 goto disable_pci; 2327 goto disable_pci;
2333 2328
2334 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) && 2329 if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
2335 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 2330 dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
2336 goto disable; 2331 goto disable;
2337 2332
2338 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); 2333 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
@@ -2373,19 +2368,21 @@ static int nvme_dev_map(struct nvme_dev *dev)
2373 2368
2374static void nvme_dev_unmap(struct nvme_dev *dev) 2369static void nvme_dev_unmap(struct nvme_dev *dev)
2375{ 2370{
2376 if (dev->pci_dev->msi_enabled) 2371 struct pci_dev *pdev = to_pci_dev(dev->dev);
2377 pci_disable_msi(dev->pci_dev); 2372
2378 else if (dev->pci_dev->msix_enabled) 2373 if (pdev->msi_enabled)
2379 pci_disable_msix(dev->pci_dev); 2374 pci_disable_msi(pdev);
2375 else if (pdev->msix_enabled)
2376 pci_disable_msix(pdev);
2380 2377
2381 if (dev->bar) { 2378 if (dev->bar) {
2382 iounmap(dev->bar); 2379 iounmap(dev->bar);
2383 dev->bar = NULL; 2380 dev->bar = NULL;
2384 pci_release_regions(dev->pci_dev); 2381 pci_release_regions(pdev);
2385 } 2382 }
2386 2383
2387 if (pci_is_enabled(dev->pci_dev)) 2384 if (pci_is_enabled(pdev))
2388 pci_disable_device(dev->pci_dev); 2385 pci_disable_device(pdev);
2389} 2386}
2390 2387
2391struct nvme_delq_ctx { 2388struct nvme_delq_ctx {
@@ -2504,7 +2501,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
2504 &worker, "nvme%d", dev->instance); 2501 &worker, "nvme%d", dev->instance);
2505 2502
2506 if (IS_ERR(kworker_task)) { 2503 if (IS_ERR(kworker_task)) {
2507 dev_err(&dev->pci_dev->dev, 2504 dev_err(dev->dev,
2508 "Failed to create queue del task\n"); 2505 "Failed to create queue del task\n");
2509 for (i = dev->queue_count - 1; i > 0; i--) 2506 for (i = dev->queue_count - 1; i > 0; i--)
2510 nvme_disable_queue(dev, i); 2507 nvme_disable_queue(dev, i);
@@ -2622,14 +2619,13 @@ static void nvme_dev_remove(struct nvme_dev *dev)
2622 2619
2623static int nvme_setup_prp_pools(struct nvme_dev *dev) 2620static int nvme_setup_prp_pools(struct nvme_dev *dev)
2624{ 2621{
2625 struct device *dmadev = &dev->pci_dev->dev; 2622 dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
2626 dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
2627 PAGE_SIZE, PAGE_SIZE, 0); 2623 PAGE_SIZE, PAGE_SIZE, 0);
2628 if (!dev->prp_page_pool) 2624 if (!dev->prp_page_pool)
2629 return -ENOMEM; 2625 return -ENOMEM;
2630 2626
2631 /* Optimisation for I/Os between 4k and 128k */ 2627 /* Optimisation for I/Os between 4k and 128k */
2632 dev->prp_small_pool = dma_pool_create("prp list 256", dmadev, 2628 dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
2633 256, 256, 0); 2629 256, 256, 0);
2634 if (!dev->prp_small_pool) { 2630 if (!dev->prp_small_pool) {
2635 dma_pool_destroy(dev->prp_page_pool); 2631 dma_pool_destroy(dev->prp_page_pool);
@@ -2693,7 +2689,7 @@ static void nvme_free_dev(struct kref *kref)
2693{ 2689{
2694 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); 2690 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
2695 2691
2696 pci_dev_put(dev->pci_dev); 2692 put_device(dev->dev);
2697 put_device(dev->device); 2693 put_device(dev->device);
2698 nvme_free_namespaces(dev); 2694 nvme_free_namespaces(dev);
2699 nvme_release_instance(dev); 2695 nvme_release_instance(dev);
@@ -2837,7 +2833,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
2837static int nvme_remove_dead_ctrl(void *arg) 2833static int nvme_remove_dead_ctrl(void *arg)
2838{ 2834{
2839 struct nvme_dev *dev = (struct nvme_dev *)arg; 2835 struct nvme_dev *dev = (struct nvme_dev *)arg;
2840 struct pci_dev *pdev = dev->pci_dev; 2836 struct pci_dev *pdev = to_pci_dev(dev->dev);
2841 2837
2842 if (pci_get_drvdata(pdev)) 2838 if (pci_get_drvdata(pdev))
2843 pci_stop_and_remove_bus_device_locked(pdev); 2839 pci_stop_and_remove_bus_device_locked(pdev);
@@ -2876,11 +2872,11 @@ static void nvme_dev_reset(struct nvme_dev *dev)
2876{ 2872{
2877 nvme_dev_shutdown(dev); 2873 nvme_dev_shutdown(dev);
2878 if (nvme_dev_resume(dev)) { 2874 if (nvme_dev_resume(dev)) {
2879 dev_warn(&dev->pci_dev->dev, "Device failed to resume\n"); 2875 dev_warn(dev->dev, "Device failed to resume\n");
2880 kref_get(&dev->kref); 2876 kref_get(&dev->kref);
2881 if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d", 2877 if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
2882 dev->instance))) { 2878 dev->instance))) {
2883 dev_err(&dev->pci_dev->dev, 2879 dev_err(dev->dev,
2884 "Failed to start controller remove task\n"); 2880 "Failed to start controller remove task\n");
2885 kref_put(&dev->kref, nvme_free_dev); 2881 kref_put(&dev->kref, nvme_free_dev);
2886 } 2882 }
@@ -2924,7 +2920,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2924 INIT_LIST_HEAD(&dev->namespaces); 2920 INIT_LIST_HEAD(&dev->namespaces);
2925 dev->reset_workfn = nvme_reset_failed_dev; 2921 dev->reset_workfn = nvme_reset_failed_dev;
2926 INIT_WORK(&dev->reset_work, nvme_reset_workfn); 2922 INIT_WORK(&dev->reset_work, nvme_reset_workfn);
2927 dev->pci_dev = pci_dev_get(pdev); 2923 dev->dev = get_device(&pdev->dev);
2928 pci_set_drvdata(pdev, dev); 2924 pci_set_drvdata(pdev, dev);
2929 result = nvme_set_instance(dev); 2925 result = nvme_set_instance(dev);
2930 if (result) 2926 if (result)
@@ -2954,7 +2950,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2954 release: 2950 release:
2955 nvme_release_instance(dev); 2951 nvme_release_instance(dev);
2956 put_pci: 2952 put_pci:
2957 pci_dev_put(dev->pci_dev); 2953 put_device(dev->dev);
2958 free: 2954 free:
2959 kfree(dev->queues); 2955 kfree(dev->queues);
2960 kfree(dev->entry); 2956 kfree(dev->entry);
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index ba1809fbd49e..f1c90f273132 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -684,7 +684,7 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
684 u8 cmdque = 0x01 << 1; 684 u8 cmdque = 0x01 << 1;
685 u8 fw_offset = sizeof(dev->firmware_rev); 685 u8 fw_offset = sizeof(dev->firmware_rev);
686 686
687 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), 687 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
688 &dma_addr, GFP_KERNEL); 688 &dma_addr, GFP_KERNEL);
689 if (mem == NULL) { 689 if (mem == NULL) {
690 res = -ENOMEM; 690 res = -ENOMEM;
@@ -728,8 +728,7 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
728 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 728 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
729 729
730 out_free: 730 out_free:
731 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, 731 dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
732 dma_addr);
733 out_dma: 732 out_dma:
734 return res; 733 return res;
735} 734}
@@ -787,7 +786,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
787 int xfer_len; 786 int xfer_len;
788 __be32 tmp_id = cpu_to_be32(ns->ns_id); 787 __be32 tmp_id = cpu_to_be32(ns->ns_id);
789 788
790 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), 789 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
791 &dma_addr, GFP_KERNEL); 790 &dma_addr, GFP_KERNEL);
792 if (mem == NULL) { 791 if (mem == NULL) {
793 res = -ENOMEM; 792 res = -ENOMEM;
@@ -842,7 +841,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
842 inq_response[6] = 0x00; /* Rsvd */ 841 inq_response[6] = 0x00; /* Rsvd */
843 inq_response[7] = 0x44; /* Designator Length */ 842 inq_response[7] = 0x44; /* Designator Length */
844 843
845 sprintf(&inq_response[8], "%04x", dev->pci_dev->vendor); 844 sprintf(&inq_response[8], "%04x", to_pci_dev(dev->dev)->vendor);
846 memcpy(&inq_response[12], dev->model, sizeof(dev->model)); 845 memcpy(&inq_response[12], dev->model, sizeof(dev->model));
847 sprintf(&inq_response[52], "%04x", tmp_id); 846 sprintf(&inq_response[52], "%04x", tmp_id);
848 memcpy(&inq_response[56], dev->serial, sizeof(dev->serial)); 847 memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
@@ -851,8 +850,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
851 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 850 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
852 851
853 out_free: 852 out_free:
854 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, 853 dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
855 dma_addr);
856 out_dma: 854 out_dma:
857 return res; 855 return res;
858} 856}
@@ -883,7 +881,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
883 goto out_mem; 881 goto out_mem;
884 } 882 }
885 883
886 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), 884 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
887 &dma_addr, GFP_KERNEL); 885 &dma_addr, GFP_KERNEL);
888 if (mem == NULL) { 886 if (mem == NULL) {
889 res = -ENOMEM; 887 res = -ENOMEM;
@@ -933,8 +931,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
933 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 931 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
934 932
935 out_free: 933 out_free:
936 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, 934 dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
937 dma_addr);
938 out_dma: 935 out_dma:
939 kfree(inq_response); 936 kfree(inq_response);
940 out_mem: 937 out_mem:
@@ -1038,8 +1035,7 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
1038 goto out_mem; 1035 goto out_mem;
1039 } 1036 }
1040 1037
1041 mem = dma_alloc_coherent(&dev->pci_dev->dev, 1038 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_smart_log),
1042 sizeof(struct nvme_smart_log),
1043 &dma_addr, GFP_KERNEL); 1039 &dma_addr, GFP_KERNEL);
1044 if (mem == NULL) { 1040 if (mem == NULL) {
1045 res = -ENOMEM; 1041 res = -ENOMEM;
@@ -1077,7 +1073,7 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
1077 xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH); 1073 xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH);
1078 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); 1074 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
1079 1075
1080 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log), 1076 dma_free_coherent(dev->dev, sizeof(struct nvme_smart_log),
1081 mem, dma_addr); 1077 mem, dma_addr);
1082 out_dma: 1078 out_dma:
1083 kfree(log_response); 1079 kfree(log_response);
@@ -1106,8 +1102,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1106 goto out_mem; 1102 goto out_mem;
1107 } 1103 }
1108 1104
1109 mem = dma_alloc_coherent(&dev->pci_dev->dev, 1105 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_smart_log),
1110 sizeof(struct nvme_smart_log),
1111 &dma_addr, GFP_KERNEL); 1106 &dma_addr, GFP_KERNEL);
1112 if (mem == NULL) { 1107 if (mem == NULL) {
1113 res = -ENOMEM; 1108 res = -ENOMEM;
@@ -1158,7 +1153,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1158 xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH); 1153 xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH);
1159 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); 1154 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
1160 1155
1161 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log), 1156 dma_free_coherent(dev->dev, sizeof(struct nvme_smart_log),
1162 mem, dma_addr); 1157 mem, dma_addr);
1163 out_dma: 1158 out_dma:
1164 kfree(log_response); 1159 kfree(log_response);
@@ -1209,7 +1204,7 @@ static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1209 else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN) 1204 else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
1210 return SNTI_INTERNAL_ERROR; 1205 return SNTI_INTERNAL_ERROR;
1211 1206
1212 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), 1207 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
1213 &dma_addr, GFP_KERNEL); 1208 &dma_addr, GFP_KERNEL);
1214 if (mem == NULL) { 1209 if (mem == NULL) {
1215 res = -ENOMEM; 1210 res = -ENOMEM;
@@ -1246,8 +1241,7 @@ static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1246 } 1241 }
1247 1242
1248 out_dma: 1243 out_dma:
1249 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, 1244 dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
1250 dma_addr);
1251 out: 1245 out:
1252 return res; 1246 return res;
1253} 1247}
@@ -1494,8 +1488,7 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1494 unsigned ps_desired = 0; 1488 unsigned ps_desired = 0;
1495 1489
1496 /* NVMe Controller Identify */ 1490 /* NVMe Controller Identify */
1497 mem = dma_alloc_coherent(&dev->pci_dev->dev, 1491 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ctrl),
1498 sizeof(struct nvme_id_ctrl),
1499 &dma_addr, GFP_KERNEL); 1492 &dma_addr, GFP_KERNEL);
1500 if (mem == NULL) { 1493 if (mem == NULL) {
1501 res = -ENOMEM; 1494 res = -ENOMEM;
@@ -1556,8 +1549,7 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1556 if (nvme_sc) 1549 if (nvme_sc)
1557 res = nvme_sc; 1550 res = nvme_sc;
1558 out_dma: 1551 out_dma:
1559 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem, 1552 dma_free_coherent(dev->dev, sizeof(struct nvme_id_ctrl), mem, dma_addr);
1560 dma_addr);
1561 out: 1553 out:
1562 return res; 1554 return res;
1563} 1555}
@@ -1820,7 +1812,7 @@ static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
1820 */ 1812 */
1821 1813
1822 if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) { 1814 if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
1823 mem = dma_alloc_coherent(&dev->pci_dev->dev, 1815 mem = dma_alloc_coherent(dev->dev,
1824 sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL); 1816 sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL);
1825 if (mem == NULL) { 1817 if (mem == NULL) {
1826 res = -ENOMEM; 1818 res = -ENOMEM;
@@ -1845,7 +1837,7 @@ static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
1845 (1 << (id_ns->lbaf[flbas].ds)); 1837 (1 << (id_ns->lbaf[flbas].ds));
1846 } 1838 }
1847 out_dma: 1839 out_dma:
1848 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), 1840 dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns),
1849 mem, dma_addr); 1841 mem, dma_addr);
1850 } 1842 }
1851 out: 1843 out:
@@ -1928,7 +1920,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1928 struct nvme_command c; 1920 struct nvme_command c;
1929 1921
1930 /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */ 1922 /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
1931 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), 1923 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
1932 &dma_addr, GFP_KERNEL); 1924 &dma_addr, GFP_KERNEL);
1933 if (mem == NULL) { 1925 if (mem == NULL) {
1934 res = -ENOMEM; 1926 res = -ENOMEM;
@@ -1979,8 +1971,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1979 res = nvme_sc; 1971 res = nvme_sc;
1980 1972
1981 out_dma: 1973 out_dma:
1982 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, 1974 dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
1983 dma_addr);
1984 out: 1975 out:
1985 return res; 1976 return res;
1986} 1977}
@@ -2485,7 +2476,7 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2485 resp_size = READ_CAP_16_RESP_SIZE; 2476 resp_size = READ_CAP_16_RESP_SIZE;
2486 } 2477 }
2487 2478
2488 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), 2479 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
2489 &dma_addr, GFP_KERNEL); 2480 &dma_addr, GFP_KERNEL);
2490 if (mem == NULL) { 2481 if (mem == NULL) {
2491 res = -ENOMEM; 2482 res = -ENOMEM;
@@ -2514,8 +2505,7 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2514 2505
2515 kfree(response); 2506 kfree(response);
2516 out_dma: 2507 out_dma:
2517 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, 2508 dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
2518 dma_addr);
2519 out: 2509 out:
2520 return res; 2510 return res;
2521} 2511}
@@ -2548,8 +2538,7 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2548 goto out; 2538 goto out;
2549 } else { 2539 } else {
2550 /* NVMe Controller Identify */ 2540 /* NVMe Controller Identify */
2551 mem = dma_alloc_coherent(&dev->pci_dev->dev, 2541 mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ctrl),
2552 sizeof(struct nvme_id_ctrl),
2553 &dma_addr, GFP_KERNEL); 2542 &dma_addr, GFP_KERNEL);
2554 if (mem == NULL) { 2543 if (mem == NULL) {
2555 res = -ENOMEM; 2544 res = -ENOMEM;
@@ -2600,8 +2589,7 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2600 2589
2601 kfree(response); 2590 kfree(response);
2602 out_dma: 2591 out_dma:
2603 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem, 2592 dma_free_coherent(dev->dev, sizeof(struct nvme_id_ctrl), mem, dma_addr);
2604 dma_addr);
2605 out: 2593 out:
2606 return res; 2594 return res;
2607} 2595}
@@ -2913,7 +2901,7 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2913 goto out; 2901 goto out;
2914 } 2902 }
2915 2903
2916 range = dma_alloc_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range), 2904 range = dma_alloc_coherent(dev->dev, ndesc * sizeof(*range),
2917 &dma_addr, GFP_KERNEL); 2905 &dma_addr, GFP_KERNEL);
2918 if (!range) 2906 if (!range)
2919 goto out; 2907 goto out;
@@ -2934,8 +2922,7 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2934 nvme_sc = nvme_submit_sync_cmd(ns->queue, &c); 2922 nvme_sc = nvme_submit_sync_cmd(ns->queue, &c);
2935 res = nvme_trans_status_code(hdr, nvme_sc); 2923 res = nvme_trans_status_code(hdr, nvme_sc);
2936 2924
2937 dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range), 2925 dma_free_coherent(dev->dev, ndesc * sizeof(*range), range, dma_addr);
2938 range, dma_addr);
2939 out: 2926 out:
2940 kfree(plist); 2927 kfree(plist);
2941 return res; 2928 return res;
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 61488b2ae291..de0e49a716b8 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -74,7 +74,7 @@ struct nvme_dev {
74 struct blk_mq_tag_set tagset; 74 struct blk_mq_tag_set tagset;
75 struct blk_mq_tag_set admin_tagset; 75 struct blk_mq_tag_set admin_tagset;
76 u32 __iomem *dbs; 76 u32 __iomem *dbs;
77 struct pci_dev *pci_dev; 77 struct device *dev;
78 struct dma_pool *prp_page_pool; 78 struct dma_pool *prp_page_pool;
79 struct dma_pool *prp_small_pool; 79 struct dma_pool *prp_small_pool;
80 int instance; 80 int instance;