diff options
author | Mark Nelson <markn@au1.ibm.com> | 2008-10-27 16:38:08 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-10-31 01:13:48 -0400 |
commit | f9226d572d2f8b5f564596db8c6a13e458c46191 (patch) | |
tree | bff17e54e92bfeea78f8a86181963db7de9a5549 /arch/powerpc/kernel | |
parent | b30115ea8f685bcd1769553fe8511745f985053c (diff) |
powerpc: Update remaining dma_mapping_ops to use map/unmap_page
After the merge of the 32 and 64bit DMA code, dma_direct_ops lost
their map/unmap_single() functions but gained map/unmap_page(). This
caused a problem for Cell because Cell's dma_iommu_fixed_ops called
the dma_direct_ops if the fixed linear mapping was to be used or the
iommu ops if the dynamic window was to be used. So in order to fix
this problem we need to update the 64bit DMA code to use
map/unmap_page.
First, we update the generic IOMMU code so that iommu_map_single()
becomes iommu_map_page() and iommu_unmap_single() becomes
iommu_unmap_page(). Then we propagate these changes up through all
the callers of these two functions and in the process update all the
dma_mapping_ops so that they have map/unmap_page rahter than
map/unmap_single. We can do this because on 64bit there is no HIGHMEM
memory so map/unmap_page ends up performing exactly the same function
as map/unmap_single, just taking different arguments.
This has no affect on drivers because the dma_map_single_attrs() just
ends up calling the map_page() function of the appropriate
dma_mapping_ops and similarly the dma_unmap_single_attrs() calls
unmap_page().
This fixes an oops on Cell blades, which oops on boot without this
because they call dma_direct_ops.map_single, which is NULL.
Signed-off-by: Mark Nelson <markn@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/dma-iommu.c | 34 | ||||
-rw-r--r-- | arch/powerpc/kernel/ibmebus.c | 27 | ||||
-rw-r--r-- | arch/powerpc/kernel/iommu.c | 22 | ||||
-rw-r--r-- | arch/powerpc/kernel/vio.c | 25 |
4 files changed, 55 insertions, 53 deletions
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 49248f89ce23..14183af1b3fb 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c | |||
@@ -30,28 +30,26 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size, | |||
30 | } | 30 | } |
31 | 31 | ||
32 | /* Creates TCEs for a user provided buffer. The user buffer must be | 32 | /* Creates TCEs for a user provided buffer. The user buffer must be |
33 | * contiguous real kernel storage (not vmalloc). The address of the buffer | 33 | * contiguous real kernel storage (not vmalloc). The address passed here |
34 | * passed here is the kernel (virtual) address of the buffer. The buffer | 34 | * comprises a page address and offset into that page. The dma_addr_t |
35 | * need not be page aligned, the dma_addr_t returned will point to the same | 35 | * returned will point to the same byte within the page as was passed in. |
36 | * byte within the page as vaddr. | ||
37 | */ | 36 | */ |
38 | static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr, | 37 | static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, |
39 | size_t size, | 38 | unsigned long offset, size_t size, |
40 | enum dma_data_direction direction, | 39 | enum dma_data_direction direction, |
41 | struct dma_attrs *attrs) | 40 | struct dma_attrs *attrs) |
42 | { | 41 | { |
43 | return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size, | 42 | return iommu_map_page(dev, dev->archdata.dma_data, page, offset, size, |
44 | device_to_mask(dev), direction, attrs); | 43 | device_to_mask(dev), direction, attrs); |
45 | } | 44 | } |
46 | 45 | ||
47 | 46 | ||
48 | static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle, | 47 | static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, |
49 | size_t size, | 48 | size_t size, enum dma_data_direction direction, |
50 | enum dma_data_direction direction, | 49 | struct dma_attrs *attrs) |
51 | struct dma_attrs *attrs) | ||
52 | { | 50 | { |
53 | iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction, | 51 | iommu_unmap_page(dev->archdata.dma_data, dma_handle, size, direction, |
54 | attrs); | 52 | attrs); |
55 | } | 53 | } |
56 | 54 | ||
57 | 55 | ||
@@ -94,10 +92,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask) | |||
94 | struct dma_mapping_ops dma_iommu_ops = { | 92 | struct dma_mapping_ops dma_iommu_ops = { |
95 | .alloc_coherent = dma_iommu_alloc_coherent, | 93 | .alloc_coherent = dma_iommu_alloc_coherent, |
96 | .free_coherent = dma_iommu_free_coherent, | 94 | .free_coherent = dma_iommu_free_coherent, |
97 | .map_single = dma_iommu_map_single, | ||
98 | .unmap_single = dma_iommu_unmap_single, | ||
99 | .map_sg = dma_iommu_map_sg, | 95 | .map_sg = dma_iommu_map_sg, |
100 | .unmap_sg = dma_iommu_unmap_sg, | 96 | .unmap_sg = dma_iommu_unmap_sg, |
101 | .dma_supported = dma_iommu_dma_supported, | 97 | .dma_supported = dma_iommu_dma_supported, |
98 | .map_page = dma_iommu_map_page, | ||
99 | .unmap_page = dma_iommu_unmap_page, | ||
102 | }; | 100 | }; |
103 | EXPORT_SYMBOL(dma_iommu_ops); | 101 | EXPORT_SYMBOL(dma_iommu_ops); |
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index a06362223f8d..64299d28f364 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c | |||
@@ -79,20 +79,21 @@ static void ibmebus_free_coherent(struct device *dev, | |||
79 | kfree(vaddr); | 79 | kfree(vaddr); |
80 | } | 80 | } |
81 | 81 | ||
82 | static dma_addr_t ibmebus_map_single(struct device *dev, | 82 | static dma_addr_t ibmebus_map_page(struct device *dev, |
83 | void *ptr, | 83 | struct page *page, |
84 | size_t size, | 84 | unsigned long offset, |
85 | enum dma_data_direction direction, | 85 | size_t size, |
86 | struct dma_attrs *attrs) | 86 | enum dma_data_direction direction, |
87 | struct dma_attrs *attrs) | ||
87 | { | 88 | { |
88 | return (dma_addr_t)(ptr); | 89 | return (dma_addr_t)(page_address(page) + offset); |
89 | } | 90 | } |
90 | 91 | ||
91 | static void ibmebus_unmap_single(struct device *dev, | 92 | static void ibmebus_unmap_page(struct device *dev, |
92 | dma_addr_t dma_addr, | 93 | dma_addr_t dma_addr, |
93 | size_t size, | 94 | size_t size, |
94 | enum dma_data_direction direction, | 95 | enum dma_data_direction direction, |
95 | struct dma_attrs *attrs) | 96 | struct dma_attrs *attrs) |
96 | { | 97 | { |
97 | return; | 98 | return; |
98 | } | 99 | } |
@@ -129,11 +130,11 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask) | |||
129 | static struct dma_mapping_ops ibmebus_dma_ops = { | 130 | static struct dma_mapping_ops ibmebus_dma_ops = { |
130 | .alloc_coherent = ibmebus_alloc_coherent, | 131 | .alloc_coherent = ibmebus_alloc_coherent, |
131 | .free_coherent = ibmebus_free_coherent, | 132 | .free_coherent = ibmebus_free_coherent, |
132 | .map_single = ibmebus_map_single, | ||
133 | .unmap_single = ibmebus_unmap_single, | ||
134 | .map_sg = ibmebus_map_sg, | 133 | .map_sg = ibmebus_map_sg, |
135 | .unmap_sg = ibmebus_unmap_sg, | 134 | .unmap_sg = ibmebus_unmap_sg, |
136 | .dma_supported = ibmebus_dma_supported, | 135 | .dma_supported = ibmebus_dma_supported, |
136 | .map_page = ibmebus_map_page, | ||
137 | .unmap_page = ibmebus_unmap_page, | ||
137 | }; | 138 | }; |
138 | 139 | ||
139 | static int ibmebus_match_path(struct device *dev, void *data) | 140 | static int ibmebus_match_path(struct device *dev, void *data) |
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 45f47c97fd14..1bfa706b96e7 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -565,21 +565,23 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name) | |||
565 | } | 565 | } |
566 | 566 | ||
567 | /* Creates TCEs for a user provided buffer. The user buffer must be | 567 | /* Creates TCEs for a user provided buffer. The user buffer must be |
568 | * contiguous real kernel storage (not vmalloc). The address of the buffer | 568 | * contiguous real kernel storage (not vmalloc). The address passed here |
569 | * passed here is the kernel (virtual) address of the buffer. The buffer | 569 | * comprises a page address and offset into that page. The dma_addr_t |
570 | * need not be page aligned, the dma_addr_t returned will point to the same | 570 | * returned will point to the same byte within the page as was passed in. |
571 | * byte within the page as vaddr. | ||
572 | */ | 571 | */ |
573 | dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl, | 572 | dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, |
574 | void *vaddr, size_t size, unsigned long mask, | 573 | struct page *page, unsigned long offset, size_t size, |
575 | enum dma_data_direction direction, struct dma_attrs *attrs) | 574 | unsigned long mask, enum dma_data_direction direction, |
575 | struct dma_attrs *attrs) | ||
576 | { | 576 | { |
577 | dma_addr_t dma_handle = DMA_ERROR_CODE; | 577 | dma_addr_t dma_handle = DMA_ERROR_CODE; |
578 | void *vaddr; | ||
578 | unsigned long uaddr; | 579 | unsigned long uaddr; |
579 | unsigned int npages, align; | 580 | unsigned int npages, align; |
580 | 581 | ||
581 | BUG_ON(direction == DMA_NONE); | 582 | BUG_ON(direction == DMA_NONE); |
582 | 583 | ||
584 | vaddr = page_address(page) + offset; | ||
583 | uaddr = (unsigned long)vaddr; | 585 | uaddr = (unsigned long)vaddr; |
584 | npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE); | 586 | npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE); |
585 | 587 | ||
@@ -605,9 +607,9 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl, | |||
605 | return dma_handle; | 607 | return dma_handle; |
606 | } | 608 | } |
607 | 609 | ||
608 | void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, | 610 | void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, |
609 | size_t size, enum dma_data_direction direction, | 611 | size_t size, enum dma_data_direction direction, |
610 | struct dma_attrs *attrs) | 612 | struct dma_attrs *attrs) |
611 | { | 613 | { |
612 | unsigned int npages; | 614 | unsigned int npages; |
613 | 615 | ||
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 434c92a85c03..a11e6bc59b30 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c | |||
@@ -516,10 +516,10 @@ static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, | |||
516 | vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); | 516 | vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); |
517 | } | 517 | } |
518 | 518 | ||
519 | static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr, | 519 | static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, |
520 | size_t size, | 520 | unsigned long offset, size_t size, |
521 | enum dma_data_direction direction, | 521 | enum dma_data_direction direction, |
522 | struct dma_attrs *attrs) | 522 | struct dma_attrs *attrs) |
523 | { | 523 | { |
524 | struct vio_dev *viodev = to_vio_dev(dev); | 524 | struct vio_dev *viodev = to_vio_dev(dev); |
525 | dma_addr_t ret = DMA_ERROR_CODE; | 525 | dma_addr_t ret = DMA_ERROR_CODE; |
@@ -529,7 +529,7 @@ static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr, | |||
529 | return ret; | 529 | return ret; |
530 | } | 530 | } |
531 | 531 | ||
532 | ret = dma_iommu_ops.map_single(dev, vaddr, size, direction, attrs); | 532 | ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs); |
533 | if (unlikely(dma_mapping_error(dev, ret))) { | 533 | if (unlikely(dma_mapping_error(dev, ret))) { |
534 | vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); | 534 | vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); |
535 | atomic_inc(&viodev->cmo.allocs_failed); | 535 | atomic_inc(&viodev->cmo.allocs_failed); |
@@ -538,14 +538,14 @@ static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr, | |||
538 | return ret; | 538 | return ret; |
539 | } | 539 | } |
540 | 540 | ||
541 | static void vio_dma_iommu_unmap_single(struct device *dev, | 541 | static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, |
542 | dma_addr_t dma_handle, size_t size, | 542 | size_t size, |
543 | enum dma_data_direction direction, | 543 | enum dma_data_direction direction, |
544 | struct dma_attrs *attrs) | 544 | struct dma_attrs *attrs) |
545 | { | 545 | { |
546 | struct vio_dev *viodev = to_vio_dev(dev); | 546 | struct vio_dev *viodev = to_vio_dev(dev); |
547 | 547 | ||
548 | dma_iommu_ops.unmap_single(dev, dma_handle, size, direction, attrs); | 548 | dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs); |
549 | 549 | ||
550 | vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); | 550 | vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); |
551 | } | 551 | } |
@@ -603,10 +603,11 @@ static void vio_dma_iommu_unmap_sg(struct device *dev, | |||
603 | struct dma_mapping_ops vio_dma_mapping_ops = { | 603 | struct dma_mapping_ops vio_dma_mapping_ops = { |
604 | .alloc_coherent = vio_dma_iommu_alloc_coherent, | 604 | .alloc_coherent = vio_dma_iommu_alloc_coherent, |
605 | .free_coherent = vio_dma_iommu_free_coherent, | 605 | .free_coherent = vio_dma_iommu_free_coherent, |
606 | .map_single = vio_dma_iommu_map_single, | ||
607 | .unmap_single = vio_dma_iommu_unmap_single, | ||
608 | .map_sg = vio_dma_iommu_map_sg, | 606 | .map_sg = vio_dma_iommu_map_sg, |
609 | .unmap_sg = vio_dma_iommu_unmap_sg, | 607 | .unmap_sg = vio_dma_iommu_unmap_sg, |
608 | .map_page = vio_dma_iommu_map_page, | ||
609 | .unmap_page = vio_dma_iommu_unmap_page, | ||
610 | |||
610 | }; | 611 | }; |
611 | 612 | ||
612 | /** | 613 | /** |