diff options
author | Mark Nelson <markn@au1.ibm.com> | 2008-10-27 16:38:08 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-10-31 01:13:48 -0400 |
commit | f9226d572d2f8b5f564596db8c6a13e458c46191 (patch) | |
tree | bff17e54e92bfeea78f8a86181963db7de9a5549 /arch/powerpc/platforms/cell/iommu.c | |
parent | b30115ea8f685bcd1769553fe8511745f985053c (diff) |
powerpc: Update remaining dma_mapping_ops to use map/unmap_page
After the merge of the 32 and 64bit DMA code, dma_direct_ops lost
their map/unmap_single() functions but gained map/unmap_page(). This
caused a problem for Cell because Cell's dma_iommu_fixed_ops called
the dma_direct_ops if the fixed linear mapping was to be used or the
iommu ops if the dynamic window was to be used. So in order to fix
this problem we need to update the 64bit DMA code to use
map/unmap_page.
First, we update the generic IOMMU code so that iommu_map_single()
becomes iommu_map_page() and iommu_unmap_single() becomes
iommu_unmap_page(). Then we propagate these changes up through all
the callers of these two functions and in the process update all the
dma_mapping_ops so that they have map/unmap_page rahter than
map/unmap_single. We can do this because on 64bit there is no HIGHMEM
memory so map/unmap_page ends up performing exactly the same function
as map/unmap_single, just taking different arguments.
This has no affect on drivers because the dma_map_single_attrs() just
ends up calling the map_page() function of the appropriate
dma_mapping_ops and similarly the dma_unmap_single_attrs() calls
unmap_page().
This fixes an oops on Cell blades, which oops on boot without this
because they call dma_direct_ops.map_single, which is NULL.
Signed-off-by: Mark Nelson <markn@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/iommu.c')
-rw-r--r-- | arch/powerpc/platforms/cell/iommu.c | 37 |
1 files changed, 18 insertions, 19 deletions
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index ef92e7146215..3168272ab0d7 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c | |||
@@ -593,31 +593,30 @@ static void dma_fixed_free_coherent(struct device *dev, size_t size, | |||
593 | dma_direct_ops.free_coherent(dev, size, vaddr, dma_handle); | 593 | dma_direct_ops.free_coherent(dev, size, vaddr, dma_handle); |
594 | } | 594 | } |
595 | 595 | ||
596 | static dma_addr_t dma_fixed_map_single(struct device *dev, void *ptr, | 596 | static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, |
597 | size_t size, | 597 | unsigned long offset, size_t size, |
598 | enum dma_data_direction direction, | 598 | enum dma_data_direction direction, |
599 | struct dma_attrs *attrs) | 599 | struct dma_attrs *attrs) |
600 | { | 600 | { |
601 | if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) | 601 | if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) |
602 | return dma_direct_ops.map_single(dev, ptr, size, direction, | 602 | return dma_direct_ops.map_page(dev, page, offset, size, |
603 | attrs); | 603 | direction, attrs); |
604 | else | 604 | else |
605 | return iommu_map_single(dev, cell_get_iommu_table(dev), ptr, | 605 | return iommu_map_page(dev, cell_get_iommu_table(dev), page, |
606 | size, device_to_mask(dev), direction, | 606 | offset, size, device_to_mask(dev), |
607 | attrs); | 607 | direction, attrs); |
608 | } | 608 | } |
609 | 609 | ||
610 | static void dma_fixed_unmap_single(struct device *dev, dma_addr_t dma_addr, | 610 | static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr, |
611 | size_t size, | 611 | size_t size, enum dma_data_direction direction, |
612 | enum dma_data_direction direction, | 612 | struct dma_attrs *attrs) |
613 | struct dma_attrs *attrs) | ||
614 | { | 613 | { |
615 | if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) | 614 | if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) |
616 | dma_direct_ops.unmap_single(dev, dma_addr, size, direction, | 615 | dma_direct_ops.unmap_page(dev, dma_addr, size, direction, |
617 | attrs); | 616 | attrs); |
618 | else | 617 | else |
619 | iommu_unmap_single(cell_get_iommu_table(dev), dma_addr, size, | 618 | iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size, |
620 | direction, attrs); | 619 | direction, attrs); |
621 | } | 620 | } |
622 | 621 | ||
623 | static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg, | 622 | static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg, |
@@ -652,12 +651,12 @@ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); | |||
652 | struct dma_mapping_ops dma_iommu_fixed_ops = { | 651 | struct dma_mapping_ops dma_iommu_fixed_ops = { |
653 | .alloc_coherent = dma_fixed_alloc_coherent, | 652 | .alloc_coherent = dma_fixed_alloc_coherent, |
654 | .free_coherent = dma_fixed_free_coherent, | 653 | .free_coherent = dma_fixed_free_coherent, |
655 | .map_single = dma_fixed_map_single, | ||
656 | .unmap_single = dma_fixed_unmap_single, | ||
657 | .map_sg = dma_fixed_map_sg, | 654 | .map_sg = dma_fixed_map_sg, |
658 | .unmap_sg = dma_fixed_unmap_sg, | 655 | .unmap_sg = dma_fixed_unmap_sg, |
659 | .dma_supported = dma_fixed_dma_supported, | 656 | .dma_supported = dma_fixed_dma_supported, |
660 | .set_dma_mask = dma_set_mask_and_switch, | 657 | .set_dma_mask = dma_set_mask_and_switch, |
658 | .map_page = dma_fixed_map_page, | ||
659 | .unmap_page = dma_fixed_unmap_page, | ||
661 | }; | 660 | }; |
662 | 661 | ||
663 | static void cell_dma_dev_setup_fixed(struct device *dev); | 662 | static void cell_dma_dev_setup_fixed(struct device *dev); |