aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBartlomiej Zolnierkiewicz <bzolnier@gmail.com>2008-02-01 17:09:32 -0500
committerBartlomiej Zolnierkiewicz <bzolnier@gmail.com>2008-02-01 17:09:32 -0500
commit5c05ff68b9a9b40a9be949497e0aa980185565cf (patch)
treed4d9a59a0f2d8b6ab3e107d5b15ed0d0709f4997
parentf8341c1c19730f1869f2f12e30fe56ff4afb4189 (diff)
ide: switch to DMA-mapping API
* pci_map_sg() -> dma_map_sg() in ide_build_sglist(). * pci_unmap_sg() -> dma_unmap_sg() in ide_destroy_dmatable(). There should be no functionality changes caused by this patch except for blackfin arch whose dma_[un]map_sg() implementation differs from pci_[un]map_sg() one (on s390 arch there is no PCI, on avr32 and h8300 archs PCI is currently unsupported, on m32r arch PCI support depends on BROKEN, on m68k arch PCI support depends on HADES which in turn depends on BROKEN, on all other archs dma_[un]map_sg() functionality matches with pci_[un]map_sg() one). blackfin behavior change was ack-ed by Bryan Wu. Cc: Bryan Wu <bryan.wu@analog.com> Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
-rw-r--r--drivers/ide/ide-dma.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 782e5da01578..ec7c5c8dc698 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -85,6 +85,7 @@
85#include <linux/ide.h> 85#include <linux/ide.h>
86#include <linux/delay.h> 86#include <linux/delay.h>
87#include <linux/scatterlist.h> 87#include <linux/scatterlist.h>
88#include <linux/dma-mapping.h>
88 89
89#include <asm/io.h> 90#include <asm/io.h>
90#include <asm/irq.h> 91#include <asm/irq.h>
@@ -175,26 +176,26 @@ static int ide_dma_good_drive(ide_drive_t *drive)
175 * @drive: the drive to build the DMA table for 176 * @drive: the drive to build the DMA table for
176 * @rq: the request holding the sg list 177 * @rq: the request holding the sg list
177 * 178 *
178 * Perform the PCI mapping magic necessary to access the source or 179 * Perform the DMA mapping magic necessary to access the source or
179 * target buffers of a request via PCI DMA. The lower layers of the 180 * target buffers of a request via DMA. The lower layers of the
180 * kernel provide the necessary cache management so that we can 181 * kernel provide the necessary cache management so that we can
181 * operate in a portable fashion 182 * operate in a portable fashion.
182 */ 183 */
183 184
184int ide_build_sglist(ide_drive_t *drive, struct request *rq) 185int ide_build_sglist(ide_drive_t *drive, struct request *rq)
185{ 186{
186 ide_hwif_t *hwif = HWIF(drive); 187 ide_hwif_t *hwif = HWIF(drive);
187 struct pci_dev *pdev = to_pci_dev(hwif->dev);
188 struct scatterlist *sg = hwif->sg_table; 188 struct scatterlist *sg = hwif->sg_table;
189 189
190 ide_map_sg(drive, rq); 190 ide_map_sg(drive, rq);
191 191
192 if (rq_data_dir(rq) == READ) 192 if (rq_data_dir(rq) == READ)
193 hwif->sg_dma_direction = PCI_DMA_FROMDEVICE; 193 hwif->sg_dma_direction = DMA_FROM_DEVICE;
194 else 194 else
195 hwif->sg_dma_direction = PCI_DMA_TODEVICE; 195 hwif->sg_dma_direction = DMA_TO_DEVICE;
196 196
197 return pci_map_sg(pdev, sg, hwif->sg_nents, hwif->sg_dma_direction); 197 return dma_map_sg(hwif->dev, sg, hwif->sg_nents,
198 hwif->sg_dma_direction);
198} 199}
199 200
200EXPORT_SYMBOL_GPL(ide_build_sglist); 201EXPORT_SYMBOL_GPL(ide_build_sglist);
@@ -308,9 +309,8 @@ EXPORT_SYMBOL_GPL(ide_build_dmatable);
308void ide_destroy_dmatable (ide_drive_t *drive) 309void ide_destroy_dmatable (ide_drive_t *drive)
309{ 310{
310 ide_hwif_t *hwif = drive->hwif; 311 ide_hwif_t *hwif = drive->hwif;
311 struct pci_dev *pdev = to_pci_dev(hwif->dev);
312 312
313 pci_unmap_sg(pdev, hwif->sg_table, hwif->sg_nents, 313 dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->sg_nents,
314 hwif->sg_dma_direction); 314 hwif->sg_dma_direction);
315} 315}
316 316