summaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-05-09 10:01:00 -0400
committerTejun Heo <tj@kernel.org>2018-05-10 14:37:28 -0400
commit258c9fded4d4024fbe5fae7739d5d159f3f69697 (patch)
tree3386d7c2841c8f5fa4cfacc4a4dca26e1113754c /drivers/ata
parentdc85ca573b95e99d325ab9fbd430c52c6f67501b (diff)
sata_nv: don't use block layer bounce buffer
sata_nv sets the block bounce limit to the reduce dma mask for ATAPI devices, which means that the iommu or swiotlb already take care of the bounce buffering, and the block bouncing can be removed. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/sata_nv.c62
1 files changed, 24 insertions, 38 deletions
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 8c683ddd0f58..b6e9ad6d33c9 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -740,32 +740,16 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
740 sdev1 = ap->host->ports[1]->link.device[0].sdev; 740 sdev1 = ap->host->ports[1]->link.device[0].sdev;
741 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || 741 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
742 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) { 742 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
743 /** We have to set the DMA mask to 32-bit if either port is in 743 /*
744 ATAPI mode, since they are on the same PCI device which is 744 * We have to set the DMA mask to 32-bit if either port is in
745 used for DMA mapping. If we set the mask we also need to set 745 * ATAPI mode, since they are on the same PCI device which is
746 the bounce limit on both ports to ensure that the block 746 * used for DMA mapping. If either SCSI device is not allocated
747 layer doesn't feed addresses that cause DMA mapping to 747 * yet, it's OK since that port will discover its correct
748 choke. If either SCSI device is not allocated yet, it's OK 748 * setting when it does get allocated.
749 since that port will discover its correct setting when it 749 */
750 does get allocated. 750 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
751 Note: Setting 32-bit mask should not fail. */
752 if (sdev0)
753 blk_queue_bounce_limit(sdev0->request_queue,
754 ATA_DMA_MASK);
755 if (sdev1)
756 blk_queue_bounce_limit(sdev1->request_queue,
757 ATA_DMA_MASK);
758
759 dma_set_mask(&pdev->dev, ATA_DMA_MASK);
760 } else { 751 } else {
761 /** This shouldn't fail as it was set to this value before */ 752 rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
762 dma_set_mask(&pdev->dev, pp->adma_dma_mask);
763 if (sdev0)
764 blk_queue_bounce_limit(sdev0->request_queue,
765 pp->adma_dma_mask);
766 if (sdev1)
767 blk_queue_bounce_limit(sdev1->request_queue,
768 pp->adma_dma_mask);
769 } 753 }
770 754
771 blk_queue_segment_boundary(sdev->request_queue, segment_boundary); 755 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
@@ -1131,12 +1115,11 @@ static int nv_adma_port_start(struct ata_port *ap)
1131 1115
1132 VPRINTK("ENTER\n"); 1116 VPRINTK("ENTER\n");
1133 1117
1134 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and 1118 /*
1135 pad buffers */ 1119 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1136 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 1120 * pad buffers.
1137 if (rc) 1121 */
1138 return rc; 1122 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1139 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1140 if (rc) 1123 if (rc)
1141 return rc; 1124 return rc;
1142 1125
@@ -1156,13 +1139,16 @@ static int nv_adma_port_start(struct ata_port *ap)
1156 pp->notifier_clear_block = pp->gen_block + 1139 pp->notifier_clear_block = pp->gen_block +
1157 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no); 1140 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1158 1141
1159 /* Now that the legacy PRD and padding buffer are allocated we can 1142 /*
1160 safely raise the DMA mask to allocate the CPB/APRD table. 1143 * Now that the legacy PRD and padding buffer are allocated we can
1161 These are allowed to fail since we store the value that ends up 1144 * try to raise the DMA mask to allocate the CPB/APRD table.
1162 being used to set as the bounce limit in slave_config later if 1145 */
1163 needed. */ 1146 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1164 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 1147 if (rc) {
1165 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1148 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1149 if (rc)
1150 return rc;
1151 }
1166 pp->adma_dma_mask = *dev->dma_mask; 1152 pp->adma_dma_mask = *dev->dma_mask;
1167 1153
1168 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, 1154 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,