summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/pmcraid.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 17:48:06 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 17:48:06 -0500
commit938edb8a31b976c9a92eb0cd4ff481e93f76c1f1 (patch)
tree0854d5f6859d51032f1d853eaa8ab0e8647fb0cb /drivers/scsi/pmcraid.c
parentaf7ddd8a627c62a835524b3f5b471edbbbcce025 (diff)
parentda7903092b880b25971ca9103cb0b934a44ace2b (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly update of the usual drivers: smarpqi, lpfc, qedi, megaraid_sas, libsas, zfcp, mpt3sas, hisi_sas. Additionally, we have a pile of annotation, unused variable and minor updates. The big API change is the updates for Christoph's DMA rework which include removing the DISABLE_CLUSTERING flag. And finally there are a couple of target tree updates" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (259 commits) scsi: isci: request: mark expected switch fall-through scsi: isci: remote_node_context: mark expected switch fall-throughs scsi: isci: remote_device: Mark expected switch fall-throughs scsi: isci: phy: Mark expected switch fall-through scsi: iscsi: Capture iscsi debug messages using tracepoints scsi: myrb: Mark expected switch fall-throughs scsi: megaraid: fix out-of-bound array accesses scsi: mpt3sas: mpt3sas_scsih: Mark expected switch fall-through scsi: fcoe: remove set but not used variable 'port' scsi: smartpqi: call pqi_free_interrupts() in pqi_shutdown() scsi: smartpqi: fix build warnings scsi: smartpqi: update driver version scsi: smartpqi: add ofa support scsi: smartpqi: increase fw status register read timeout scsi: smartpqi: bump driver version scsi: smartpqi: add smp_utils support scsi: smartpqi: correct lun reset issues scsi: smartpqi: correct volume status scsi: smartpqi: do not offline disks for transient did no connect conditions scsi: smartpqi: allow for larger raid maps ...
Diffstat (limited to 'drivers/scsi/pmcraid.c')
-rw-r--r--drivers/scsi/pmcraid.c117
1 files changed, 50 insertions, 67 deletions
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 4e86994e10e8..7c4673308f5b 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -846,16 +846,9 @@ static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
846 cmd->ioa_cb->ioarcb.cdb[0], ioasc); 846 cmd->ioa_cb->ioarcb.cdb[0], ioasc);
847 } 847 }
848 848
849 /* if we had allocated sense buffers for request sense, copy the sense 849 if (cmd->sense_buffer) {
850 * release the buffers 850 dma_unmap_single(&pinstance->pdev->dev, cmd->sense_buffer_dma,
851 */ 851 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
852 if (cmd->sense_buffer != NULL) {
853 memcpy(scsi_cmd->sense_buffer,
854 cmd->sense_buffer,
855 SCSI_SENSE_BUFFERSIZE);
856 pci_free_consistent(pinstance->pdev,
857 SCSI_SENSE_BUFFERSIZE,
858 cmd->sense_buffer, cmd->sense_buffer_dma);
859 cmd->sense_buffer = NULL; 852 cmd->sense_buffer = NULL;
860 cmd->sense_buffer_dma = 0; 853 cmd->sense_buffer_dma = 0;
861 } 854 }
@@ -2444,13 +2437,12 @@ static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
2444{ 2437{
2445 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; 2438 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
2446 struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl; 2439 struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
2440 struct device *dev = &cmd->drv_inst->pdev->dev;
2447 2441
2448 /* allocate DMAable memory for sense buffers */ 2442 cmd->sense_buffer = cmd->scsi_cmd->sense_buffer;
2449 cmd->sense_buffer = pci_alloc_consistent(cmd->drv_inst->pdev, 2443 cmd->sense_buffer_dma = dma_map_single(dev, cmd->sense_buffer,
2450 SCSI_SENSE_BUFFERSIZE, 2444 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
2451 &cmd->sense_buffer_dma); 2445 if (dma_mapping_error(dev, cmd->sense_buffer_dma)) {
2452
2453 if (cmd->sense_buffer == NULL) {
2454 pmcraid_err 2446 pmcraid_err
2455 ("couldn't allocate sense buffer for request sense\n"); 2447 ("couldn't allocate sense buffer for request sense\n");
2456 pmcraid_erp_done(cmd); 2448 pmcraid_erp_done(cmd);
@@ -2491,17 +2483,15 @@ static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
2491/** 2483/**
2492 * pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery 2484 * pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery
2493 * @cmd: command that failed 2485 * @cmd: command that failed
2494 * @sense: true if request_sense is required after cancel all 2486 * @need_sense: true if request_sense is required after cancel all
2495 * 2487 *
2496 * This function sends a cancel all to a device to clear the queue. 2488 * This function sends a cancel all to a device to clear the queue.
2497 */ 2489 */
2498static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense) 2490static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, bool need_sense)
2499{ 2491{
2500 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; 2492 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2501 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; 2493 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
2502 struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata; 2494 struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
2503 void (*cmd_done) (struct pmcraid_cmd *) = sense ? pmcraid_erp_done
2504 : pmcraid_request_sense;
2505 2495
2506 memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN); 2496 memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
2507 ioarcb->request_flags0 = SYNC_OVERRIDE; 2497 ioarcb->request_flags0 = SYNC_OVERRIDE;
@@ -2519,7 +2509,8 @@ static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense)
2519 /* writing to IOARRIN must be protected by host_lock, as mid-layer 2509 /* writing to IOARRIN must be protected by host_lock, as mid-layer
2520 * schedule queuecommand while we are doing this 2510 * schedule queuecommand while we are doing this
2521 */ 2511 */
2522 pmcraid_send_cmd(cmd, cmd_done, 2512 pmcraid_send_cmd(cmd, need_sense ?
2513 pmcraid_erp_done : pmcraid_request_sense,
2523 PMCRAID_REQUEST_SENSE_TIMEOUT, 2514 PMCRAID_REQUEST_SENSE_TIMEOUT,
2524 pmcraid_timeout_handler); 2515 pmcraid_timeout_handler);
2525} 2516}
@@ -2612,7 +2603,7 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
2612 struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa; 2603 struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
2613 u32 ioasc = le32_to_cpu(ioasa->ioasc); 2604 u32 ioasc = le32_to_cpu(ioasa->ioasc);
2614 u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK; 2605 u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK;
2615 u32 sense_copied = 0; 2606 bool sense_copied = false;
2616 2607
2617 if (!res) { 2608 if (!res) {
2618 pmcraid_info("resource pointer is NULL\n"); 2609 pmcraid_info("resource pointer is NULL\n");
@@ -2684,7 +2675,7 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
2684 memcpy(scsi_cmd->sense_buffer, 2675 memcpy(scsi_cmd->sense_buffer,
2685 ioasa->sense_data, 2676 ioasa->sense_data,
2686 data_size); 2677 data_size);
2687 sense_copied = 1; 2678 sense_copied = true;
2688 } 2679 }
2689 2680
2690 if (RES_IS_GSCSI(res->cfg_entry)) 2681 if (RES_IS_GSCSI(res->cfg_entry))
@@ -3523,7 +3514,7 @@ static int pmcraid_build_passthrough_ioadls(
3523 return -ENOMEM; 3514 return -ENOMEM;
3524 } 3515 }
3525 3516
3526 sglist->num_dma_sg = pci_map_sg(cmd->drv_inst->pdev, 3517 sglist->num_dma_sg = dma_map_sg(&cmd->drv_inst->pdev->dev,
3527 sglist->scatterlist, 3518 sglist->scatterlist,
3528 sglist->num_sg, direction); 3519 sglist->num_sg, direction);
3529 3520
@@ -3572,7 +3563,7 @@ static void pmcraid_release_passthrough_ioadls(
3572 struct pmcraid_sglist *sglist = cmd->sglist; 3563 struct pmcraid_sglist *sglist = cmd->sglist;
3573 3564
3574 if (buflen > 0) { 3565 if (buflen > 0) {
3575 pci_unmap_sg(cmd->drv_inst->pdev, 3566 dma_unmap_sg(&cmd->drv_inst->pdev->dev,
3576 sglist->scatterlist, 3567 sglist->scatterlist,
3577 sglist->num_sg, 3568 sglist->num_sg,
3578 direction); 3569 direction);
@@ -4158,7 +4149,6 @@ static struct scsi_host_template pmcraid_host_template = {
4158 .max_sectors = PMCRAID_IOA_MAX_SECTORS, 4149 .max_sectors = PMCRAID_IOA_MAX_SECTORS,
4159 .no_write_same = 1, 4150 .no_write_same = 1,
4160 .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN, 4151 .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
4161 .use_clustering = ENABLE_CLUSTERING,
4162 .shost_attrs = pmcraid_host_attrs, 4152 .shost_attrs = pmcraid_host_attrs,
4163 .proc_name = PMCRAID_DRIVER_NAME, 4153 .proc_name = PMCRAID_DRIVER_NAME,
4164}; 4154};
@@ -4708,9 +4698,9 @@ static void
4708pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex) 4698pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex)
4709{ 4699{
4710 int i; 4700 int i;
4711 for (i = 0; i < maxindex; i++) {
4712 4701
4713 pci_free_consistent(pinstance->pdev, 4702 for (i = 0; i < maxindex; i++) {
4703 dma_free_coherent(&pinstance->pdev->dev,
4714 HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD, 4704 HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD,
4715 pinstance->hrrq_start[i], 4705 pinstance->hrrq_start[i],
4716 pinstance->hrrq_start_bus_addr[i]); 4706 pinstance->hrrq_start_bus_addr[i]);
@@ -4737,11 +4727,9 @@ static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
4737 4727
4738 for (i = 0; i < pinstance->num_hrrq; i++) { 4728 for (i = 0; i < pinstance->num_hrrq; i++) {
4739 pinstance->hrrq_start[i] = 4729 pinstance->hrrq_start[i] =
4740 pci_alloc_consistent( 4730 dma_alloc_coherent(&pinstance->pdev->dev, buffer_size,
4741 pinstance->pdev, 4731 &pinstance->hrrq_start_bus_addr[i],
4742 buffer_size, 4732 GFP_KERNEL);
4743 &(pinstance->hrrq_start_bus_addr[i]));
4744
4745 if (!pinstance->hrrq_start[i]) { 4733 if (!pinstance->hrrq_start[i]) {
4746 pmcraid_err("pci_alloc failed for hrrq vector : %d\n", 4734 pmcraid_err("pci_alloc failed for hrrq vector : %d\n",
4747 i); 4735 i);
@@ -4770,7 +4758,7 @@ static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
4770static void pmcraid_release_hcams(struct pmcraid_instance *pinstance) 4758static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
4771{ 4759{
4772 if (pinstance->ccn.msg != NULL) { 4760 if (pinstance->ccn.msg != NULL) {
4773 pci_free_consistent(pinstance->pdev, 4761 dma_free_coherent(&pinstance->pdev->dev,
4774 PMCRAID_AEN_HDR_SIZE + 4762 PMCRAID_AEN_HDR_SIZE +
4775 sizeof(struct pmcraid_hcam_ccn_ext), 4763 sizeof(struct pmcraid_hcam_ccn_ext),
4776 pinstance->ccn.msg, 4764 pinstance->ccn.msg,
@@ -4782,7 +4770,7 @@ static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
4782 } 4770 }
4783 4771
4784 if (pinstance->ldn.msg != NULL) { 4772 if (pinstance->ldn.msg != NULL) {
4785 pci_free_consistent(pinstance->pdev, 4773 dma_free_coherent(&pinstance->pdev->dev,
4786 PMCRAID_AEN_HDR_SIZE + 4774 PMCRAID_AEN_HDR_SIZE +
4787 sizeof(struct pmcraid_hcam_ldn), 4775 sizeof(struct pmcraid_hcam_ldn),
4788 pinstance->ldn.msg, 4776 pinstance->ldn.msg,
@@ -4803,17 +4791,15 @@ static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
4803 */ 4791 */
4804static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance) 4792static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance)
4805{ 4793{
4806 pinstance->ccn.msg = pci_alloc_consistent( 4794 pinstance->ccn.msg = dma_alloc_coherent(&pinstance->pdev->dev,
4807 pinstance->pdev,
4808 PMCRAID_AEN_HDR_SIZE + 4795 PMCRAID_AEN_HDR_SIZE +
4809 sizeof(struct pmcraid_hcam_ccn_ext), 4796 sizeof(struct pmcraid_hcam_ccn_ext),
4810 &(pinstance->ccn.baddr)); 4797 &pinstance->ccn.baddr, GFP_KERNEL);
4811 4798
4812 pinstance->ldn.msg = pci_alloc_consistent( 4799 pinstance->ldn.msg = dma_alloc_coherent(&pinstance->pdev->dev,
4813 pinstance->pdev,
4814 PMCRAID_AEN_HDR_SIZE + 4800 PMCRAID_AEN_HDR_SIZE +
4815 sizeof(struct pmcraid_hcam_ldn), 4801 sizeof(struct pmcraid_hcam_ldn),
4816 &(pinstance->ldn.baddr)); 4802 &pinstance->ldn.baddr, GFP_KERNEL);
4817 4803
4818 if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) { 4804 if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) {
4819 pmcraid_release_hcams(pinstance); 4805 pmcraid_release_hcams(pinstance);
@@ -4841,7 +4827,7 @@ static void pmcraid_release_config_buffers(struct pmcraid_instance *pinstance)
4841{ 4827{
4842 if (pinstance->cfg_table != NULL && 4828 if (pinstance->cfg_table != NULL &&
4843 pinstance->cfg_table_bus_addr != 0) { 4829 pinstance->cfg_table_bus_addr != 0) {
4844 pci_free_consistent(pinstance->pdev, 4830 dma_free_coherent(&pinstance->pdev->dev,
4845 sizeof(struct pmcraid_config_table), 4831 sizeof(struct pmcraid_config_table),
4846 pinstance->cfg_table, 4832 pinstance->cfg_table,
4847 pinstance->cfg_table_bus_addr); 4833 pinstance->cfg_table_bus_addr);
@@ -4886,10 +4872,10 @@ static int pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance)
4886 list_add_tail(&pinstance->res_entries[i].queue, 4872 list_add_tail(&pinstance->res_entries[i].queue,
4887 &pinstance->free_res_q); 4873 &pinstance->free_res_q);
4888 4874
4889 pinstance->cfg_table = 4875 pinstance->cfg_table = dma_alloc_coherent(&pinstance->pdev->dev,
4890 pci_alloc_consistent(pinstance->pdev,
4891 sizeof(struct pmcraid_config_table), 4876 sizeof(struct pmcraid_config_table),
4892 &pinstance->cfg_table_bus_addr); 4877 &pinstance->cfg_table_bus_addr,
4878 GFP_KERNEL);
4893 4879
4894 if (NULL == pinstance->cfg_table) { 4880 if (NULL == pinstance->cfg_table) {
4895 pmcraid_err("couldn't alloc DMA memory for config table\n"); 4881 pmcraid_err("couldn't alloc DMA memory for config table\n");
@@ -4954,7 +4940,7 @@ static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
4954 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq); 4940 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
4955 4941
4956 if (pinstance->inq_data != NULL) { 4942 if (pinstance->inq_data != NULL) {
4957 pci_free_consistent(pinstance->pdev, 4943 dma_free_coherent(&pinstance->pdev->dev,
4958 sizeof(struct pmcraid_inquiry_data), 4944 sizeof(struct pmcraid_inquiry_data),
4959 pinstance->inq_data, 4945 pinstance->inq_data,
4960 pinstance->inq_data_baddr); 4946 pinstance->inq_data_baddr);
@@ -4964,7 +4950,7 @@ static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
4964 } 4950 }
4965 4951
4966 if (pinstance->timestamp_data != NULL) { 4952 if (pinstance->timestamp_data != NULL) {
4967 pci_free_consistent(pinstance->pdev, 4953 dma_free_coherent(&pinstance->pdev->dev,
4968 sizeof(struct pmcraid_timestamp_data), 4954 sizeof(struct pmcraid_timestamp_data),
4969 pinstance->timestamp_data, 4955 pinstance->timestamp_data,
4970 pinstance->timestamp_data_baddr); 4956 pinstance->timestamp_data_baddr);
@@ -4981,8 +4967,8 @@ static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
4981 * This routine pre-allocates memory based on the type of block as below: 4967 * This routine pre-allocates memory based on the type of block as below:
4982 * cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator, 4968 * cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator,
4983 * IOARCBs(PMCRAID_MAX_CMD) : DMAable memory, using pci pool allocator 4969 * IOARCBs(PMCRAID_MAX_CMD) : DMAable memory, using pci pool allocator
4984 * config-table entries : DMAable memory using pci_alloc_consistent 4970 * config-table entries : DMAable memory using dma_alloc_coherent
4985 * HostRRQs : DMAable memory, using pci_alloc_consistent 4971 * HostRRQs : DMAable memory, using dma_alloc_coherent
4986 * 4972 *
4987 * Return Value 4973 * Return Value
4988 * 0 in case all of the blocks are allocated, -ENOMEM otherwise. 4974 * 0 in case all of the blocks are allocated, -ENOMEM otherwise.
@@ -5019,11 +5005,9 @@ static int pmcraid_init_buffers(struct pmcraid_instance *pinstance)
5019 } 5005 }
5020 5006
5021 /* allocate DMAable memory for page D0 INQUIRY buffer */ 5007 /* allocate DMAable memory for page D0 INQUIRY buffer */
5022 pinstance->inq_data = pci_alloc_consistent( 5008 pinstance->inq_data = dma_alloc_coherent(&pinstance->pdev->dev,
5023 pinstance->pdev,
5024 sizeof(struct pmcraid_inquiry_data), 5009 sizeof(struct pmcraid_inquiry_data),
5025 &pinstance->inq_data_baddr); 5010 &pinstance->inq_data_baddr, GFP_KERNEL);
5026
5027 if (pinstance->inq_data == NULL) { 5011 if (pinstance->inq_data == NULL) {
5028 pmcraid_err("couldn't allocate DMA memory for INQUIRY\n"); 5012 pmcraid_err("couldn't allocate DMA memory for INQUIRY\n");
5029 pmcraid_release_buffers(pinstance); 5013 pmcraid_release_buffers(pinstance);
@@ -5031,11 +5015,10 @@ static int pmcraid_init_buffers(struct pmcraid_instance *pinstance)
5031 } 5015 }
5032 5016
5033 /* allocate DMAable memory for set timestamp data buffer */ 5017 /* allocate DMAable memory for set timestamp data buffer */
5034 pinstance->timestamp_data = pci_alloc_consistent( 5018 pinstance->timestamp_data = dma_alloc_coherent(&pinstance->pdev->dev,
5035 pinstance->pdev,
5036 sizeof(struct pmcraid_timestamp_data), 5019 sizeof(struct pmcraid_timestamp_data),
5037 &pinstance->timestamp_data_baddr); 5020 &pinstance->timestamp_data_baddr,
5038 5021 GFP_KERNEL);
5039 if (pinstance->timestamp_data == NULL) { 5022 if (pinstance->timestamp_data == NULL) {
5040 pmcraid_err("couldn't allocate DMA memory for \ 5023 pmcraid_err("couldn't allocate DMA memory for \
5041 set time_stamp \n"); 5024 set time_stamp \n");
@@ -5324,12 +5307,12 @@ static int pmcraid_resume(struct pci_dev *pdev)
5324 5307
5325 pci_set_master(pdev); 5308 pci_set_master(pdev);
5326 5309
5327 if ((sizeof(dma_addr_t) == 4) || 5310 if (sizeof(dma_addr_t) == 4 ||
5328 pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) 5311 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
5329 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 5312 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
5330 5313
5331 if (rc == 0) 5314 if (rc == 0)
5332 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 5315 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5333 5316
5334 if (rc != 0) { 5317 if (rc != 0) {
5335 dev_err(&pdev->dev, "resume: Failed to set PCI DMA mask\n"); 5318 dev_err(&pdev->dev, "resume: Failed to set PCI DMA mask\n");
@@ -5733,19 +5716,19 @@ static int pmcraid_probe(struct pci_dev *pdev,
5733 /* Firmware requires the system bus address of IOARCB to be within 5716 /* Firmware requires the system bus address of IOARCB to be within
5734 * 32-bit addressable range though it has 64-bit IOARRIN register. 5717 * 32-bit addressable range though it has 64-bit IOARRIN register.
5735 * However, firmware supports 64-bit streaming DMA buffers, whereas 5718 * However, firmware supports 64-bit streaming DMA buffers, whereas
5736 * coherent buffers are to be 32-bit. Since pci_alloc_consistent always 5719 * coherent buffers are to be 32-bit. Since dma_alloc_coherent always
5737 * returns memory within 4GB (if not, change this logic), coherent 5720 * returns memory within 4GB (if not, change this logic), coherent
5738 * buffers are within firmware acceptable address ranges. 5721 * buffers are within firmware acceptable address ranges.
5739 */ 5722 */
5740 if ((sizeof(dma_addr_t) == 4) || 5723 if (sizeof(dma_addr_t) == 4 ||
5741 pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) 5724 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
5742 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 5725 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
5743 5726
5744 /* firmware expects 32-bit DMA addresses for IOARRIN register; set 32 5727 /* firmware expects 32-bit DMA addresses for IOARRIN register; set 32
5745 * bit mask for pci_alloc_consistent to return addresses within 4GB 5728 * bit mask for dma_alloc_coherent to return addresses within 4GB
5746 */ 5729 */
5747 if (rc == 0) 5730 if (rc == 0)
5748 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 5731 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5749 5732
5750 if (rc != 0) { 5733 if (rc != 0) {
5751 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); 5734 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");