diff options
author | Mark Nelson <markn@au1.ibm.com> | 2008-07-15 15:51:47 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-07-21 20:39:32 -0400 |
commit | 4f3dd8a06239c0a19d772a27c2f618dc2faadf4a (patch) | |
tree | f7185cd89a1ec2c4bdb356a52a39407e432e7f7d /arch/powerpc/kernel/iommu.c | |
parent | 4795b7801b07e1b7286edb0d9321433fc0eac6cc (diff) |
powerpc/dma: Use the struct dma_attrs in iommu code
Update iommu_alloc() to take the struct dma_attrs and pass them on to
tce_build(). This change propagates down to the tce_build functions of
all the platforms.
Signed-off-by: Mark Nelson <markn@au1.ibm.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r-- | arch/powerpc/kernel/iommu.c | 13 |
1 files changed, 8 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 8c68ee9e5d1c..2385f68c1751 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -186,7 +186,8 @@ static unsigned long iommu_range_alloc(struct device *dev, | |||
186 | static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, | 186 | static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, |
187 | void *page, unsigned int npages, | 187 | void *page, unsigned int npages, |
188 | enum dma_data_direction direction, | 188 | enum dma_data_direction direction, |
189 | unsigned long mask, unsigned int align_order) | 189 | unsigned long mask, unsigned int align_order, |
190 | struct dma_attrs *attrs) | ||
190 | { | 191 | { |
191 | unsigned long entry, flags; | 192 | unsigned long entry, flags; |
192 | dma_addr_t ret = DMA_ERROR_CODE; | 193 | dma_addr_t ret = DMA_ERROR_CODE; |
@@ -205,7 +206,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, | |||
205 | 206 | ||
206 | /* Put the TCEs in the HW table */ | 207 | /* Put the TCEs in the HW table */ |
207 | ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK, | 208 | ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK, |
208 | direction); | 209 | direction, attrs); |
209 | 210 | ||
210 | 211 | ||
211 | /* Flush/invalidate TLB caches if necessary */ | 212 | /* Flush/invalidate TLB caches if necessary */ |
@@ -336,7 +337,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, | |||
336 | npages, entry, dma_addr); | 337 | npages, entry, dma_addr); |
337 | 338 | ||
338 | /* Insert into HW table */ | 339 | /* Insert into HW table */ |
339 | ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction); | 340 | ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, |
341 | direction, attrs); | ||
340 | 342 | ||
341 | /* If we are in an open segment, try merging */ | 343 | /* If we are in an open segment, try merging */ |
342 | if (segstart != s) { | 344 | if (segstart != s) { |
@@ -573,7 +575,8 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl, | |||
573 | align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; | 575 | align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; |
574 | 576 | ||
575 | dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, | 577 | dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, |
576 | mask >> IOMMU_PAGE_SHIFT, align); | 578 | mask >> IOMMU_PAGE_SHIFT, align, |
579 | attrs); | ||
577 | if (dma_handle == DMA_ERROR_CODE) { | 580 | if (dma_handle == DMA_ERROR_CODE) { |
578 | if (printk_ratelimit()) { | 581 | if (printk_ratelimit()) { |
579 | printk(KERN_INFO "iommu_alloc failed, " | 582 | printk(KERN_INFO "iommu_alloc failed, " |
@@ -642,7 +645,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, | |||
642 | nio_pages = size >> IOMMU_PAGE_SHIFT; | 645 | nio_pages = size >> IOMMU_PAGE_SHIFT; |
643 | io_order = get_iommu_order(size); | 646 | io_order = get_iommu_order(size); |
644 | mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, | 647 | mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, |
645 | mask >> IOMMU_PAGE_SHIFT, io_order); | 648 | mask >> IOMMU_PAGE_SHIFT, io_order, NULL); |
646 | if (mapping == DMA_ERROR_CODE) { | 649 | if (mapping == DMA_ERROR_CODE) { |
647 | free_pages((unsigned long)ret, order); | 650 | free_pages((unsigned long)ret, order); |
648 | return NULL; | 651 | return NULL; |