diff options
author | Christoph Hellwig <hch@lst.de> | 2018-10-10 14:22:40 -0400 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2018-10-17 21:58:53 -0400 |
commit | 9b7ca6c24cb4bef2a704f34b4e11c471360d02a0 (patch) | |
tree | 5119965598998723711ac08d0ee533dda5f65a5e | |
parent | cecfed31fda849767799e5521064796a21c5164c (diff) |
scsi: vmw_pscsi: switch to generic DMA API
Switch from the legacy PCI DMA API to the generic DMA API.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r-- | drivers/scsi/vmw_pvscsi.c | 77 |
1 files changed, 38 insertions, 39 deletions
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 0cd947f78b5b..6e491023fdd8 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c | |||
@@ -372,9 +372,9 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter, | |||
372 | pvscsi_create_sg(ctx, sg, segs); | 372 | pvscsi_create_sg(ctx, sg, segs); |
373 | 373 | ||
374 | e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; | 374 | e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; |
375 | ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl, | 375 | ctx->sglPA = dma_map_single(&adapter->dev->dev, |
376 | SGL_SIZE, PCI_DMA_TODEVICE); | 376 | ctx->sgl, SGL_SIZE, DMA_TO_DEVICE); |
377 | if (pci_dma_mapping_error(adapter->dev, ctx->sglPA)) { | 377 | if (dma_mapping_error(&adapter->dev->dev, ctx->sglPA)) { |
378 | scmd_printk(KERN_ERR, cmd, | 378 | scmd_printk(KERN_ERR, cmd, |
379 | "vmw_pvscsi: Failed to map ctx sglist for DMA.\n"); | 379 | "vmw_pvscsi: Failed to map ctx sglist for DMA.\n"); |
380 | scsi_dma_unmap(cmd); | 380 | scsi_dma_unmap(cmd); |
@@ -389,9 +389,9 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter, | |||
389 | * In case there is no S/G list, scsi_sglist points | 389 | * In case there is no S/G list, scsi_sglist points |
390 | * directly to the buffer. | 390 | * directly to the buffer. |
391 | */ | 391 | */ |
392 | ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen, | 392 | ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen, |
393 | cmd->sc_data_direction); | 393 | cmd->sc_data_direction); |
394 | if (pci_dma_mapping_error(adapter->dev, ctx->dataPA)) { | 394 | if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) { |
395 | scmd_printk(KERN_ERR, cmd, | 395 | scmd_printk(KERN_ERR, cmd, |
396 | "vmw_pvscsi: Failed to map direct data buffer for DMA.\n"); | 396 | "vmw_pvscsi: Failed to map direct data buffer for DMA.\n"); |
397 | return -ENOMEM; | 397 | return -ENOMEM; |
@@ -417,23 +417,23 @@ static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter, | |||
417 | if (count != 0) { | 417 | if (count != 0) { |
418 | scsi_dma_unmap(cmd); | 418 | scsi_dma_unmap(cmd); |
419 | if (ctx->sglPA) { | 419 | if (ctx->sglPA) { |
420 | pci_unmap_single(adapter->dev, ctx->sglPA, | 420 | dma_unmap_single(&adapter->dev->dev, ctx->sglPA, |
421 | SGL_SIZE, PCI_DMA_TODEVICE); | 421 | SGL_SIZE, DMA_TO_DEVICE); |
422 | ctx->sglPA = 0; | 422 | ctx->sglPA = 0; |
423 | } | 423 | } |
424 | } else | 424 | } else |
425 | pci_unmap_single(adapter->dev, ctx->dataPA, bufflen, | 425 | dma_unmap_single(&adapter->dev->dev, ctx->dataPA, |
426 | cmd->sc_data_direction); | 426 | bufflen, cmd->sc_data_direction); |
427 | } | 427 | } |
428 | if (cmd->sense_buffer) | 428 | if (cmd->sense_buffer) |
429 | pci_unmap_single(adapter->dev, ctx->sensePA, | 429 | dma_unmap_single(&adapter->dev->dev, ctx->sensePA, |
430 | SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); | 430 | SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); |
431 | } | 431 | } |
432 | 432 | ||
433 | static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter) | 433 | static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter) |
434 | { | 434 | { |
435 | adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE, | 435 | adapter->rings_state = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE, |
436 | &adapter->ringStatePA); | 436 | &adapter->ringStatePA, GFP_KERNEL); |
437 | if (!adapter->rings_state) | 437 | if (!adapter->rings_state) |
438 | return -ENOMEM; | 438 | return -ENOMEM; |
439 | 439 | ||
@@ -441,17 +441,17 @@ static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter) | |||
441 | pvscsi_ring_pages); | 441 | pvscsi_ring_pages); |
442 | adapter->req_depth = adapter->req_pages | 442 | adapter->req_depth = adapter->req_pages |
443 | * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; | 443 | * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; |
444 | adapter->req_ring = pci_alloc_consistent(adapter->dev, | 444 | adapter->req_ring = dma_alloc_coherent(&adapter->dev->dev, |
445 | adapter->req_pages * PAGE_SIZE, | 445 | adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA, |
446 | &adapter->reqRingPA); | 446 | GFP_KERNEL); |
447 | if (!adapter->req_ring) | 447 | if (!adapter->req_ring) |
448 | return -ENOMEM; | 448 | return -ENOMEM; |
449 | 449 | ||
450 | adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING, | 450 | adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING, |
451 | pvscsi_ring_pages); | 451 | pvscsi_ring_pages); |
452 | adapter->cmp_ring = pci_alloc_consistent(adapter->dev, | 452 | adapter->cmp_ring = dma_alloc_coherent(&adapter->dev->dev, |
453 | adapter->cmp_pages * PAGE_SIZE, | 453 | adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA, |
454 | &adapter->cmpRingPA); | 454 | GFP_KERNEL); |
455 | if (!adapter->cmp_ring) | 455 | if (!adapter->cmp_ring) |
456 | return -ENOMEM; | 456 | return -ENOMEM; |
457 | 457 | ||
@@ -464,9 +464,9 @@ static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter) | |||
464 | 464 | ||
465 | adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING, | 465 | adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING, |
466 | pvscsi_msg_ring_pages); | 466 | pvscsi_msg_ring_pages); |
467 | adapter->msg_ring = pci_alloc_consistent(adapter->dev, | 467 | adapter->msg_ring = dma_alloc_coherent(&adapter->dev->dev, |
468 | adapter->msg_pages * PAGE_SIZE, | 468 | adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA, |
469 | &adapter->msgRingPA); | 469 | GFP_KERNEL); |
470 | if (!adapter->msg_ring) | 470 | if (!adapter->msg_ring) |
471 | return -ENOMEM; | 471 | return -ENOMEM; |
472 | BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE)); | 472 | BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE)); |
@@ -708,10 +708,10 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, | |||
708 | e->lun[1] = sdev->lun; | 708 | e->lun[1] = sdev->lun; |
709 | 709 | ||
710 | if (cmd->sense_buffer) { | 710 | if (cmd->sense_buffer) { |
711 | ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer, | 711 | ctx->sensePA = dma_map_single(&adapter->dev->dev, |
712 | SCSI_SENSE_BUFFERSIZE, | 712 | cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, |
713 | PCI_DMA_FROMDEVICE); | 713 | DMA_FROM_DEVICE); |
714 | if (pci_dma_mapping_error(adapter->dev, ctx->sensePA)) { | 714 | if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) { |
715 | scmd_printk(KERN_ERR, cmd, | 715 | scmd_printk(KERN_ERR, cmd, |
716 | "vmw_pvscsi: Failed to map sense buffer for DMA.\n"); | 716 | "vmw_pvscsi: Failed to map sense buffer for DMA.\n"); |
717 | ctx->sensePA = 0; | 717 | ctx->sensePA = 0; |
@@ -740,9 +740,9 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, | |||
740 | 740 | ||
741 | if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) { | 741 | if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) { |
742 | if (cmd->sense_buffer) { | 742 | if (cmd->sense_buffer) { |
743 | pci_unmap_single(adapter->dev, ctx->sensePA, | 743 | dma_unmap_single(&adapter->dev->dev, ctx->sensePA, |
744 | SCSI_SENSE_BUFFERSIZE, | 744 | SCSI_SENSE_BUFFERSIZE, |
745 | PCI_DMA_FROMDEVICE); | 745 | DMA_FROM_DEVICE); |
746 | ctx->sensePA = 0; | 746 | ctx->sensePA = 0; |
747 | } | 747 | } |
748 | return -ENOMEM; | 748 | return -ENOMEM; |
@@ -1218,21 +1218,21 @@ static void pvscsi_release_resources(struct pvscsi_adapter *adapter) | |||
1218 | } | 1218 | } |
1219 | 1219 | ||
1220 | if (adapter->rings_state) | 1220 | if (adapter->rings_state) |
1221 | pci_free_consistent(adapter->dev, PAGE_SIZE, | 1221 | dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, |
1222 | adapter->rings_state, adapter->ringStatePA); | 1222 | adapter->rings_state, adapter->ringStatePA); |
1223 | 1223 | ||
1224 | if (adapter->req_ring) | 1224 | if (adapter->req_ring) |
1225 | pci_free_consistent(adapter->dev, | 1225 | dma_free_coherent(&adapter->dev->dev, |
1226 | adapter->req_pages * PAGE_SIZE, | 1226 | adapter->req_pages * PAGE_SIZE, |
1227 | adapter->req_ring, adapter->reqRingPA); | 1227 | adapter->req_ring, adapter->reqRingPA); |
1228 | 1228 | ||
1229 | if (adapter->cmp_ring) | 1229 | if (adapter->cmp_ring) |
1230 | pci_free_consistent(adapter->dev, | 1230 | dma_free_coherent(&adapter->dev->dev, |
1231 | adapter->cmp_pages * PAGE_SIZE, | 1231 | adapter->cmp_pages * PAGE_SIZE, |
1232 | adapter->cmp_ring, adapter->cmpRingPA); | 1232 | adapter->cmp_ring, adapter->cmpRingPA); |
1233 | 1233 | ||
1234 | if (adapter->msg_ring) | 1234 | if (adapter->msg_ring) |
1235 | pci_free_consistent(adapter->dev, | 1235 | dma_free_coherent(&adapter->dev->dev, |
1236 | adapter->msg_pages * PAGE_SIZE, | 1236 | adapter->msg_pages * PAGE_SIZE, |
1237 | adapter->msg_ring, adapter->msgRingPA); | 1237 | adapter->msg_ring, adapter->msgRingPA); |
1238 | } | 1238 | } |
@@ -1291,8 +1291,8 @@ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter) | |||
1291 | u32 numPhys = 16; | 1291 | u32 numPhys = 16; |
1292 | 1292 | ||
1293 | dev = pvscsi_dev(adapter); | 1293 | dev = pvscsi_dev(adapter); |
1294 | config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE, | 1294 | config_page = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE, |
1295 | &configPagePA); | 1295 | &configPagePA, GFP_KERNEL); |
1296 | if (!config_page) { | 1296 | if (!config_page) { |
1297 | dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n"); | 1297 | dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n"); |
1298 | goto exit; | 1298 | goto exit; |
@@ -1326,7 +1326,8 @@ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter) | |||
1326 | } else | 1326 | } else |
1327 | dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n", | 1327 | dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n", |
1328 | header->hostStatus, header->scsiStatus); | 1328 | header->hostStatus, header->scsiStatus); |
1329 | pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA); | 1329 | dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, config_page, |
1330 | configPagePA); | ||
1330 | exit: | 1331 | exit: |
1331 | return numPhys; | 1332 | return numPhys; |
1332 | } | 1333 | } |
@@ -1346,11 +1347,9 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1346 | if (pci_enable_device(pdev)) | 1347 | if (pci_enable_device(pdev)) |
1347 | return error; | 1348 | return error; |
1348 | 1349 | ||
1349 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 && | 1350 | if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { |
1350 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { | ||
1351 | printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n"); | 1351 | printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n"); |
1352 | } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 && | 1352 | } else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { |
1353 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) { | ||
1354 | printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n"); | 1353 | printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n"); |
1355 | } else { | 1354 | } else { |
1356 | printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n"); | 1355 | printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n"); |