aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-10-10 13:53:14 -0400
committerMartin K. Petersen <martin.petersen@oracle.com>2018-10-17 21:58:52 -0400
commitab8e7f4bdfeac57074c8a8a9ca12bcd101fdf1ca (patch)
tree2073c15736c14417b17a50d10af8ea31688733fe
parent1c2048bdc3f4ff3337613c27519cf608916e95a9 (diff)
scsi: mvumi: switch to generic DMA API
Switch from the legacy PCI DMA API to the generic DMA API. Also reuse an existing helper (after fixing the error return) to set the DMA mask instead of having three copies of the code. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r--drivers/scsi/mvumi.c89
1 files changed, 36 insertions, 53 deletions
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index b3cd9a6b1d30..2458974d1af6 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -143,8 +143,8 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
143 143
144 case RESOURCE_UNCACHED_MEMORY: 144 case RESOURCE_UNCACHED_MEMORY:
145 size = round_up(size, 8); 145 size = round_up(size, 8);
146 res->virt_addr = pci_zalloc_consistent(mhba->pdev, size, 146 res->virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size,
147 &res->bus_addr); 147 &res->bus_addr, GFP_KERNEL);
148 if (!res->virt_addr) { 148 if (!res->virt_addr) {
149 dev_err(&mhba->pdev->dev, 149 dev_err(&mhba->pdev->dev,
150 "unable to allocate consistent mem," 150 "unable to allocate consistent mem,"
@@ -175,7 +175,7 @@ static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
175 list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) { 175 list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
176 switch (res->type) { 176 switch (res->type) {
177 case RESOURCE_UNCACHED_MEMORY: 177 case RESOURCE_UNCACHED_MEMORY:
178 pci_free_consistent(mhba->pdev, res->size, 178 dma_free_coherent(&mhba->pdev->dev, res->size,
179 res->virt_addr, res->bus_addr); 179 res->virt_addr, res->bus_addr);
180 break; 180 break;
181 case RESOURCE_CACHED_MEMORY: 181 case RESOURCE_CACHED_MEMORY:
@@ -211,14 +211,14 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
211 dma_addr_t busaddr; 211 dma_addr_t busaddr;
212 212
213 sg = scsi_sglist(scmd); 213 sg = scsi_sglist(scmd);
214 *sg_count = pci_map_sg(mhba->pdev, sg, sgnum, 214 *sg_count = dma_map_sg(&mhba->pdev->dev, sg, sgnum,
215 (int) scmd->sc_data_direction); 215 scmd->sc_data_direction);
216 if (*sg_count > mhba->max_sge) { 216 if (*sg_count > mhba->max_sge) {
217 dev_err(&mhba->pdev->dev, 217 dev_err(&mhba->pdev->dev,
218 "sg count[0x%x] is bigger than max sg[0x%x].\n", 218 "sg count[0x%x] is bigger than max sg[0x%x].\n",
219 *sg_count, mhba->max_sge); 219 *sg_count, mhba->max_sge);
220 pci_unmap_sg(mhba->pdev, sg, sgnum, 220 dma_unmap_sg(&mhba->pdev->dev, sg, sgnum,
221 (int) scmd->sc_data_direction); 221 scmd->sc_data_direction);
222 return -1; 222 return -1;
223 } 223 }
224 for (i = 0; i < *sg_count; i++) { 224 for (i = 0; i < *sg_count; i++) {
@@ -246,7 +246,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
246 if (size == 0) 246 if (size == 0)
247 return 0; 247 return 0;
248 248
249 virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr); 249 virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, &phy_addr,
250 GFP_KERNEL);
250 if (!virt_addr) 251 if (!virt_addr)
251 return -1; 252 return -1;
252 253
@@ -274,8 +275,8 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
274 } 275 }
275 INIT_LIST_HEAD(&cmd->queue_pointer); 276 INIT_LIST_HEAD(&cmd->queue_pointer);
276 277
277 cmd->frame = pci_alloc_consistent(mhba->pdev, 278 cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size,
278 mhba->ib_max_size, &cmd->frame_phys); 279 &cmd->frame_phys, GFP_KERNEL);
279 if (!cmd->frame) { 280 if (!cmd->frame) {
280 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW" 281 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
281 " frame,size = %d.\n", mhba->ib_max_size); 282 " frame,size = %d.\n", mhba->ib_max_size);
@@ -287,7 +288,7 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
287 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) { 288 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
288 dev_err(&mhba->pdev->dev, "failed to allocate memory" 289 dev_err(&mhba->pdev->dev, "failed to allocate memory"
289 " for internal frame\n"); 290 " for internal frame\n");
290 pci_free_consistent(mhba->pdev, mhba->ib_max_size, 291 dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
291 cmd->frame, cmd->frame_phys); 292 cmd->frame, cmd->frame_phys);
292 kfree(cmd); 293 kfree(cmd);
293 return NULL; 294 return NULL;
@@ -313,10 +314,10 @@ static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
313 phy_addr = (dma_addr_t) m_sg->baseaddr_l | 314 phy_addr = (dma_addr_t) m_sg->baseaddr_l |
314 (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16); 315 (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
315 316
316 pci_free_consistent(mhba->pdev, size, cmd->data_buf, 317 dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf,
317 phy_addr); 318 phy_addr);
318 } 319 }
319 pci_free_consistent(mhba->pdev, mhba->ib_max_size, 320 dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
320 cmd->frame, cmd->frame_phys); 321 cmd->frame, cmd->frame_phys);
321 kfree(cmd); 322 kfree(cmd);
322 } 323 }
@@ -663,16 +664,17 @@ static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
663 } 664 }
664} 665}
665 666
666static unsigned int mvumi_pci_set_master(struct pci_dev *pdev) 667static int mvumi_pci_set_master(struct pci_dev *pdev)
667{ 668{
668 unsigned int ret = 0; 669 int ret = 0;
670
669 pci_set_master(pdev); 671 pci_set_master(pdev);
670 672
671 if (IS_DMA64) { 673 if (IS_DMA64) {
672 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) 674 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
673 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 675 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
674 } else 676 } else
675 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 677 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
676 678
677 return ret; 679 return ret;
678} 680}
@@ -771,7 +773,7 @@ static void mvumi_release_fw(struct mvumi_hba *mhba)
771 mvumi_free_cmds(mhba); 773 mvumi_free_cmds(mhba);
772 mvumi_release_mem_resource(mhba); 774 mvumi_release_mem_resource(mhba);
773 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); 775 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
774 pci_free_consistent(mhba->pdev, HSP_MAX_SIZE, 776 dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
775 mhba->handshake_page, mhba->handshake_page_phys); 777 mhba->handshake_page, mhba->handshake_page_phys);
776 kfree(mhba->regs); 778 kfree(mhba->regs);
777 pci_release_regions(mhba->pdev); 779 pci_release_regions(mhba->pdev);
@@ -1339,9 +1341,9 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1339 } 1341 }
1340 1342
1341 if (scsi_bufflen(scmd)) 1343 if (scsi_bufflen(scmd))
1342 pci_unmap_sg(mhba->pdev, scsi_sglist(scmd), 1344 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
1343 scsi_sg_count(scmd), 1345 scsi_sg_count(scmd),
1344 (int) scmd->sc_data_direction); 1346 scmd->sc_data_direction);
1345 cmd->scmd->scsi_done(scmd); 1347 cmd->scmd->scsi_done(scmd);
1346 mvumi_return_cmd(mhba, cmd); 1348 mvumi_return_cmd(mhba, cmd);
1347} 1349}
@@ -2148,9 +2150,9 @@ static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
2148 scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16); 2150 scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
2149 scmd->SCp.ptr = NULL; 2151 scmd->SCp.ptr = NULL;
2150 if (scsi_bufflen(scmd)) { 2152 if (scsi_bufflen(scmd)) {
2151 pci_unmap_sg(mhba->pdev, scsi_sglist(scmd), 2153 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
2152 scsi_sg_count(scmd), 2154 scsi_sg_count(scmd),
2153 (int)scmd->sc_data_direction); 2155 scmd->sc_data_direction);
2154 } 2156 }
2155 mvumi_return_cmd(mhba, cmd); 2157 mvumi_return_cmd(mhba, cmd);
2156 spin_unlock_irqrestore(mhba->shost->host_lock, flags); 2158 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
@@ -2362,8 +2364,8 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
2362 ret = -ENOMEM; 2364 ret = -ENOMEM;
2363 goto fail_alloc_mem; 2365 goto fail_alloc_mem;
2364 } 2366 }
2365 mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE, 2367 mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev,
2366 &mhba->handshake_page_phys); 2368 HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL);
2367 if (!mhba->handshake_page) { 2369 if (!mhba->handshake_page) {
2368 dev_err(&mhba->pdev->dev, 2370 dev_err(&mhba->pdev->dev,
2369 "failed to allocate memory for handshake\n"); 2371 "failed to allocate memory for handshake\n");
@@ -2383,7 +2385,7 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
2383 2385
2384fail_ready_state: 2386fail_ready_state:
2385 mvumi_release_mem_resource(mhba); 2387 mvumi_release_mem_resource(mhba);
2386 pci_free_consistent(mhba->pdev, HSP_MAX_SIZE, 2388 dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
2387 mhba->handshake_page, mhba->handshake_page_phys); 2389 mhba->handshake_page, mhba->handshake_page_phys);
2388fail_alloc_page: 2390fail_alloc_page:
2389 kfree(mhba->regs); 2391 kfree(mhba->regs);
@@ -2480,20 +2482,9 @@ static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2480 if (ret) 2482 if (ret)
2481 return ret; 2483 return ret;
2482 2484
2483 pci_set_master(pdev); 2485 ret = mvumi_pci_set_master(pdev);
2484 2486 if (ret)
2485 if (IS_DMA64) { 2487 goto fail_set_dma_mask;
2486 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2487 if (ret) {
2488 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2489 if (ret)
2490 goto fail_set_dma_mask;
2491 }
2492 } else {
2493 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2494 if (ret)
2495 goto fail_set_dma_mask;
2496 }
2497 2488
2498 host = scsi_host_alloc(&mvumi_template, sizeof(*mhba)); 2489 host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
2499 if (!host) { 2490 if (!host) {
@@ -2627,19 +2618,11 @@ static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
2627 dev_err(&pdev->dev, "enable device failed\n"); 2618 dev_err(&pdev->dev, "enable device failed\n");
2628 return ret; 2619 return ret;
2629 } 2620 }
2630 pci_set_master(pdev); 2621
2631 if (IS_DMA64) { 2622 ret = mvumi_pci_set_master(pdev);
2632 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2623 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2633 if (ret) { 2624 if (ret)
2634 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2625 goto fail;
2635 if (ret)
2636 goto fail;
2637 }
2638 } else {
2639 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2640 if (ret)
2641 goto fail;
2642 }
2643 ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME); 2626 ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
2644 if (ret) 2627 if (ret)
2645 goto fail; 2628 goto fail;