aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-10-10 14:16:41 -0400
committerMartin K. Petersen <martin.petersen@oracle.com>2018-10-17 21:58:53 -0400
commitcecfed31fda849767799e5521064796a21c5164c (patch)
treef35f95754d04ff3f8ae0081c53beb8d909834c71
parentec44a6762fc57c82acdf2c12ec9b542d9f308300 (diff)
scsi: snic: switch to generic DMA API
Switch from the legacy PCI DMA API to the generic DMA API. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r--drivers/scsi/snic/snic_disc.c7
-rw-r--r--drivers/scsi/snic/snic_io.c25
-rw-r--r--drivers/scsi/snic/snic_main.c24
-rw-r--r--drivers/scsi/snic/snic_scsi.c11
-rw-r--r--drivers/scsi/snic/vnic_dev.c29
5 files changed, 38 insertions, 58 deletions
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
index b106596cc0cf..e9ccfb97773f 100644
--- a/drivers/scsi/snic/snic_disc.c
+++ b/drivers/scsi/snic/snic_disc.c
@@ -111,8 +111,8 @@ snic_queue_report_tgt_req(struct snic *snic)
111 111
112 SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0); 112 SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0);
113 113
114 pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE); 114 pa = dma_map_single(&snic->pdev->dev, buf, buf_len, DMA_FROM_DEVICE);
115 if (pci_dma_mapping_error(snic->pdev, pa)) { 115 if (dma_mapping_error(&snic->pdev->dev, pa)) {
116 SNIC_HOST_ERR(snic->shost, 116 SNIC_HOST_ERR(snic->shost,
117 "Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n", 117 "Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n",
118 buf); 118 buf);
@@ -138,7 +138,8 @@ snic_queue_report_tgt_req(struct snic *snic)
138 138
139 ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len); 139 ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
140 if (ret) { 140 if (ret) {
141 pci_unmap_single(snic->pdev, pa, buf_len, PCI_DMA_FROMDEVICE); 141 dma_unmap_single(&snic->pdev->dev, pa, buf_len,
142 DMA_FROM_DEVICE);
142 kfree(buf); 143 kfree(buf);
143 rqi->sge_va = 0; 144 rqi->sge_va = 0;
144 snic_release_untagged_req(snic, rqi); 145 snic_release_untagged_req(snic, rqi);
diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c
index 8e69548395b9..159ee94d2a55 100644
--- a/drivers/scsi/snic/snic_io.c
+++ b/drivers/scsi/snic/snic_io.c
@@ -102,7 +102,8 @@ snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
102 struct snic_req_info *rqi = NULL; 102 struct snic_req_info *rqi = NULL;
103 unsigned long flags; 103 unsigned long flags;
104 104
105 pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); 105 dma_unmap_single(&snic->pdev->dev, buf->dma_addr, buf->len,
106 DMA_TO_DEVICE);
106 107
107 rqi = req_to_rqi(req); 108 rqi = req_to_rqi(req);
108 spin_lock_irqsave(&snic->spl_cmd_lock, flags); 109 spin_lock_irqsave(&snic->spl_cmd_lock, flags);
@@ -172,8 +173,8 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
172 snic_print_desc(__func__, os_buf, len); 173 snic_print_desc(__func__, os_buf, len);
173 174
174 /* Map request buffer */ 175 /* Map request buffer */
175 pa = pci_map_single(snic->pdev, os_buf, len, PCI_DMA_TODEVICE); 176 pa = dma_map_single(&snic->pdev->dev, os_buf, len, DMA_TO_DEVICE);
176 if (pci_dma_mapping_error(snic->pdev, pa)) { 177 if (dma_mapping_error(&snic->pdev->dev, pa)) {
177 SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n"); 178 SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n");
178 179
179 return -ENOMEM; 180 return -ENOMEM;
@@ -186,7 +187,7 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
186 spin_lock_irqsave(&snic->wq_lock[q_num], flags); 187 spin_lock_irqsave(&snic->wq_lock[q_num], flags);
187 desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type); 188 desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type);
188 if (desc_avail <= 0) { 189 if (desc_avail <= 0) {
189 pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE); 190 dma_unmap_single(&snic->pdev->dev, pa, len, DMA_TO_DEVICE);
190 req->req_pa = 0; 191 req->req_pa = 0;
191 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); 192 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
192 atomic64_inc(&snic->s_stats.misc.wq_alloc_fail); 193 atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
@@ -350,29 +351,29 @@ snic_req_free(struct snic *snic, struct snic_req_info *rqi)
350 351
351 if (rqi->abort_req) { 352 if (rqi->abort_req) {
352 if (rqi->abort_req->req_pa) 353 if (rqi->abort_req->req_pa)
353 pci_unmap_single(snic->pdev, 354 dma_unmap_single(&snic->pdev->dev,
354 rqi->abort_req->req_pa, 355 rqi->abort_req->req_pa,
355 sizeof(struct snic_host_req), 356 sizeof(struct snic_host_req),
356 PCI_DMA_TODEVICE); 357 DMA_TO_DEVICE);
357 358
358 mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]); 359 mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
359 } 360 }
360 361
361 if (rqi->dr_req) { 362 if (rqi->dr_req) {
362 if (rqi->dr_req->req_pa) 363 if (rqi->dr_req->req_pa)
363 pci_unmap_single(snic->pdev, 364 dma_unmap_single(&snic->pdev->dev,
364 rqi->dr_req->req_pa, 365 rqi->dr_req->req_pa,
365 sizeof(struct snic_host_req), 366 sizeof(struct snic_host_req),
366 PCI_DMA_TODEVICE); 367 DMA_TO_DEVICE);
367 368
368 mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]); 369 mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
369 } 370 }
370 371
371 if (rqi->req->req_pa) 372 if (rqi->req->req_pa)
372 pci_unmap_single(snic->pdev, 373 dma_unmap_single(&snic->pdev->dev,
373 rqi->req->req_pa, 374 rqi->req->req_pa,
374 rqi->req_len, 375 rqi->req_len,
375 PCI_DMA_TODEVICE); 376 DMA_TO_DEVICE);
376 377
377 mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]); 378 mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
378} 379}
@@ -384,10 +385,10 @@ snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi)
384 385
385 sgd = req_to_sgl(rqi_to_req(rqi)); 386 sgd = req_to_sgl(rqi_to_req(rqi));
386 SNIC_BUG_ON(sgd[0].addr == 0); 387 SNIC_BUG_ON(sgd[0].addr == 0);
387 pci_unmap_single(snic->pdev, 388 dma_unmap_single(&snic->pdev->dev,
388 le64_to_cpu(sgd[0].addr), 389 le64_to_cpu(sgd[0].addr),
389 le32_to_cpu(sgd[0].len), 390 le32_to_cpu(sgd[0].len),
390 PCI_DMA_FROMDEVICE); 391 DMA_FROM_DEVICE);
391} 392}
392 393
393/* 394/*
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 7cf70aaec0ba..5295277d6325 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -435,37 +435,17 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
435 * limitation for the device. Try 43-bit first, and 435 * limitation for the device. Try 43-bit first, and
436 * fail to 32-bit. 436 * fail to 32-bit.
437 */ 437 */
438 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43)); 438 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43));
439 if (ret) { 439 if (ret) {
440 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 440 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
441 if (ret) { 441 if (ret) {
442 SNIC_HOST_ERR(shost, 442 SNIC_HOST_ERR(shost,
443 "No Usable DMA Configuration, aborting %d\n", 443 "No Usable DMA Configuration, aborting %d\n",
444 ret); 444 ret);
445
446 goto err_rel_regions;
447 }
448
449 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
450 if (ret) {
451 SNIC_HOST_ERR(shost,
452 "Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n",
453 ret);
454
455 goto err_rel_regions;
456 }
457 } else {
458 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43));
459 if (ret) {
460 SNIC_HOST_ERR(shost,
461 "Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n",
462 ret);
463
464 goto err_rel_regions; 445 goto err_rel_regions;
465 } 446 }
466 } 447 }
467 448
468
469 /* Map vNIC resources from BAR0 */ 449 /* Map vNIC resources from BAR0 */
470 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 450 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
471 SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n"); 451 SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index 42e485139fc9..b3650c989ed4 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -146,10 +146,10 @@ snic_release_req_buf(struct snic *snic,
146 CMD_FLAGS(sc)); 146 CMD_FLAGS(sc));
147 147
148 if (req->u.icmnd.sense_addr) 148 if (req->u.icmnd.sense_addr)
149 pci_unmap_single(snic->pdev, 149 dma_unmap_single(&snic->pdev->dev,
150 le64_to_cpu(req->u.icmnd.sense_addr), 150 le64_to_cpu(req->u.icmnd.sense_addr),
151 SCSI_SENSE_BUFFERSIZE, 151 SCSI_SENSE_BUFFERSIZE,
152 PCI_DMA_FROMDEVICE); 152 DMA_FROM_DEVICE);
153 153
154 scsi_dma_unmap(sc); 154 scsi_dma_unmap(sc);
155 155
@@ -185,12 +185,11 @@ snic_queue_icmnd_req(struct snic *snic,
185 } 185 }
186 } 186 }
187 187
188 pa = pci_map_single(snic->pdev, 188 pa = dma_map_single(&snic->pdev->dev,
189 sc->sense_buffer, 189 sc->sense_buffer,
190 SCSI_SENSE_BUFFERSIZE, 190 SCSI_SENSE_BUFFERSIZE,
191 PCI_DMA_FROMDEVICE); 191 DMA_FROM_DEVICE);
192 192 if (dma_mapping_error(&snic->pdev->dev, pa)) {
193 if (pci_dma_mapping_error(snic->pdev, pa)) {
194 SNIC_HOST_ERR(snic->shost, 193 SNIC_HOST_ERR(snic->shost,
195 "QIcmnd:PCI Map Failed for sns buf %p tag %x\n", 194 "QIcmnd:PCI Map Failed for sns buf %p tag %x\n",
196 sc->sense_buffer, snic_cmd_tag(sc)); 195 sc->sense_buffer, snic_cmd_tag(sc));
diff --git a/drivers/scsi/snic/vnic_dev.c b/drivers/scsi/snic/vnic_dev.c
index dad5fc66effb..05e374f80946 100644
--- a/drivers/scsi/snic/vnic_dev.c
+++ b/drivers/scsi/snic/vnic_dev.c
@@ -225,10 +225,9 @@ int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
225{ 225{
226 svnic_dev_desc_ring_size(ring, desc_count, desc_size); 226 svnic_dev_desc_ring_size(ring, desc_count, desc_size);
227 227
228 ring->descs_unaligned = pci_alloc_consistent(vdev->pdev, 228 ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
229 ring->size_unaligned, 229 ring->size_unaligned, &ring->base_addr_unaligned,
230 &ring->base_addr_unaligned); 230 GFP_KERNEL);
231
232 if (!ring->descs_unaligned) { 231 if (!ring->descs_unaligned) {
233 pr_err("Failed to allocate ring (size=%d), aborting\n", 232 pr_err("Failed to allocate ring (size=%d), aborting\n",
234 (int)ring->size); 233 (int)ring->size);
@@ -251,7 +250,7 @@ int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
251void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) 250void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
252{ 251{
253 if (ring->descs) { 252 if (ring->descs) {
254 pci_free_consistent(vdev->pdev, 253 dma_free_coherent(&vdev->pdev->dev,
255 ring->size_unaligned, 254 ring->size_unaligned,
256 ring->descs_unaligned, 255 ring->descs_unaligned,
257 ring->base_addr_unaligned); 256 ring->base_addr_unaligned);
@@ -470,9 +469,9 @@ int svnic_dev_fw_info(struct vnic_dev *vdev,
470 int err = 0; 469 int err = 0;
471 470
472 if (!vdev->fw_info) { 471 if (!vdev->fw_info) {
473 vdev->fw_info = pci_alloc_consistent(vdev->pdev, 472 vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
474 sizeof(struct vnic_devcmd_fw_info), 473 sizeof(struct vnic_devcmd_fw_info),
475 &vdev->fw_info_pa); 474 &vdev->fw_info_pa, GFP_KERNEL);
476 if (!vdev->fw_info) 475 if (!vdev->fw_info)
477 return -ENOMEM; 476 return -ENOMEM;
478 477
@@ -534,8 +533,8 @@ int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
534 int wait = VNIC_DVCMD_TMO; 533 int wait = VNIC_DVCMD_TMO;
535 534
536 if (!vdev->stats) { 535 if (!vdev->stats) {
537 vdev->stats = pci_alloc_consistent(vdev->pdev, 536 vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
538 sizeof(struct vnic_stats), &vdev->stats_pa); 537 sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
539 if (!vdev->stats) 538 if (!vdev->stats)
540 return -ENOMEM; 539 return -ENOMEM;
541 } 540 }
@@ -607,9 +606,9 @@ int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
607 int wait = VNIC_DVCMD_TMO; 606 int wait = VNIC_DVCMD_TMO;
608 607
609 if (!vdev->notify) { 608 if (!vdev->notify) {
610 vdev->notify = pci_alloc_consistent(vdev->pdev, 609 vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
611 sizeof(struct vnic_devcmd_notify), 610 sizeof(struct vnic_devcmd_notify),
612 &vdev->notify_pa); 611 &vdev->notify_pa, GFP_KERNEL);
613 if (!vdev->notify) 612 if (!vdev->notify)
614 return -ENOMEM; 613 return -ENOMEM;
615 } 614 }
@@ -697,21 +696,21 @@ void svnic_dev_unregister(struct vnic_dev *vdev)
697{ 696{
698 if (vdev) { 697 if (vdev) {
699 if (vdev->notify) 698 if (vdev->notify)
700 pci_free_consistent(vdev->pdev, 699 dma_free_coherent(&vdev->pdev->dev,
701 sizeof(struct vnic_devcmd_notify), 700 sizeof(struct vnic_devcmd_notify),
702 vdev->notify, 701 vdev->notify,
703 vdev->notify_pa); 702 vdev->notify_pa);
704 if (vdev->linkstatus) 703 if (vdev->linkstatus)
705 pci_free_consistent(vdev->pdev, 704 dma_free_coherent(&vdev->pdev->dev,
706 sizeof(u32), 705 sizeof(u32),
707 vdev->linkstatus, 706 vdev->linkstatus,
708 vdev->linkstatus_pa); 707 vdev->linkstatus_pa);
709 if (vdev->stats) 708 if (vdev->stats)
710 pci_free_consistent(vdev->pdev, 709 dma_free_coherent(&vdev->pdev->dev,
711 sizeof(struct vnic_stats), 710 sizeof(struct vnic_stats),
712 vdev->stats, vdev->stats_pa); 711 vdev->stats, vdev->stats_pa);
713 if (vdev->fw_info) 712 if (vdev->fw_info)
714 pci_free_consistent(vdev->pdev, 713 dma_free_coherent(&vdev->pdev->dev,
715 sizeof(struct vnic_devcmd_fw_info), 714 sizeof(struct vnic_devcmd_fw_info),
716 vdev->fw_info, vdev->fw_info_pa); 715 vdev->fw_info, vdev->fw_info_pa);
717 if (vdev->devcmd2) 716 if (vdev->devcmd2)