aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2009-05-14 12:23:10 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-16 07:56:45 -0400
commit797a75686528e9f6f9bfee2a719a00b47868c999 (patch)
tree98f5aadddd6b373e69719b95bb2fd68fbe1fe29f
parent2e8fcfe2e51b2ee0fef3313e3c9ec25b9d6ed255 (diff)
sparc: use dma_map_page instead of dma_map_single
This patch converts dma_map_single and dma_unmap_single to use map_page and unmap_page respectively and removes unnecessary map_single and unmap_single. map_page can be used to implement map_single but the opposite is impossible. Having only dma_map_page in struct dma_ops is enough. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Tested-by: Robert Reif <reif@earthlink.net> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/include/asm/dma-mapping_64.h23
-rw-r--r--arch/sparc/kernel/iommu.c15
-rw-r--r--arch/sparc/kernel/pci_sun4v.c15
3 files changed, 28 insertions, 25 deletions
diff --git a/arch/sparc/include/asm/dma-mapping_64.h b/arch/sparc/include/asm/dma-mapping_64.h
index 0c4fb1f81caf..579757e00a25 100644
--- a/arch/sparc/include/asm/dma-mapping_64.h
+++ b/arch/sparc/include/asm/dma-mapping_64.h
@@ -9,12 +9,12 @@ struct dma_ops {
9 dma_addr_t *dma_handle, gfp_t flag); 9 dma_addr_t *dma_handle, gfp_t flag);
10 void (*free_coherent)(struct device *dev, size_t size, 10 void (*free_coherent)(struct device *dev, size_t size,
11 void *cpu_addr, dma_addr_t dma_handle); 11 void *cpu_addr, dma_addr_t dma_handle);
12 dma_addr_t (*map_single)(struct device *dev, void *cpu_addr, 12 dma_addr_t (*map_page)(struct device *dev, struct page *page,
13 size_t size, 13 unsigned long offset, size_t size,
14 enum dma_data_direction direction); 14 enum dma_data_direction direction);
15 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, 15 void (*unmap_page)(struct device *dev, dma_addr_t dma_addr,
16 size_t size, 16 size_t size,
17 enum dma_data_direction direction); 17 enum dma_data_direction direction);
18 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, 18 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
19 enum dma_data_direction direction); 19 enum dma_data_direction direction);
20 void (*unmap_sg)(struct device *dev, struct scatterlist *sg, 20 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
@@ -51,29 +51,30 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
51 size_t size, 51 size_t size,
52 enum dma_data_direction direction) 52 enum dma_data_direction direction)
53{ 53{
54 return dma_ops->map_single(dev, cpu_addr, size, direction); 54 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
55 (unsigned long)cpu_addr & ~PAGE_MASK, size,
56 direction);
55} 57}
56 58
57static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, 59static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
58 size_t size, 60 size_t size,
59 enum dma_data_direction direction) 61 enum dma_data_direction direction)
60{ 62{
61 dma_ops->unmap_single(dev, dma_addr, size, direction); 63 dma_ops->unmap_page(dev, dma_addr, size, direction);
62} 64}
63 65
64static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 66static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65 unsigned long offset, size_t size, 67 unsigned long offset, size_t size,
66 enum dma_data_direction direction) 68 enum dma_data_direction direction)
67{ 69{
68 return dma_ops->map_single(dev, page_address(page) + offset, 70 return dma_ops->map_page(dev, page, offset, size, direction);
69 size, direction);
70} 71}
71 72
72static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, 73static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
73 size_t size, 74 size_t size,
74 enum dma_data_direction direction) 75 enum dma_data_direction direction)
75{ 76{
76 dma_ops->unmap_single(dev, dma_address, size, direction); 77 dma_ops->unmap_page(dev, dma_address, size, direction);
77} 78}
78 79
79static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, 80static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index d8900e1d5aad..0aeaefe696b9 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -351,8 +351,9 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
351 free_pages((unsigned long)cpu, order); 351 free_pages((unsigned long)cpu, order);
352} 352}
353 353
354static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz, 354static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
355 enum dma_data_direction direction) 355 unsigned long offset, size_t sz,
356 enum dma_data_direction direction)
356{ 357{
357 struct iommu *iommu; 358 struct iommu *iommu;
358 struct strbuf *strbuf; 359 struct strbuf *strbuf;
@@ -368,7 +369,7 @@ static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
368 if (unlikely(direction == DMA_NONE)) 369 if (unlikely(direction == DMA_NONE))
369 goto bad_no_ctx; 370 goto bad_no_ctx;
370 371
371 oaddr = (unsigned long)ptr; 372 oaddr = (unsigned long)(page_address(page) + offset);
372 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 373 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
373 npages >>= IO_PAGE_SHIFT; 374 npages >>= IO_PAGE_SHIFT;
374 375
@@ -472,8 +473,8 @@ do_flush_sync:
472 vaddr, ctx, npages); 473 vaddr, ctx, npages);
473} 474}
474 475
475static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr, 476static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
476 size_t sz, enum dma_data_direction direction) 477 size_t sz, enum dma_data_direction direction)
477{ 478{
478 struct iommu *iommu; 479 struct iommu *iommu;
479 struct strbuf *strbuf; 480 struct strbuf *strbuf;
@@ -824,8 +825,8 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
824static const struct dma_ops sun4u_dma_ops = { 825static const struct dma_ops sun4u_dma_ops = {
825 .alloc_coherent = dma_4u_alloc_coherent, 826 .alloc_coherent = dma_4u_alloc_coherent,
826 .free_coherent = dma_4u_free_coherent, 827 .free_coherent = dma_4u_free_coherent,
827 .map_single = dma_4u_map_single, 828 .map_page = dma_4u_map_page,
828 .unmap_single = dma_4u_unmap_single, 829 .unmap_page = dma_4u_unmap_page,
829 .map_sg = dma_4u_map_sg, 830 .map_sg = dma_4u_map_sg,
830 .unmap_sg = dma_4u_unmap_sg, 831 .unmap_sg = dma_4u_unmap_sg,
831 .sync_single_for_cpu = dma_4u_sync_single_for_cpu, 832 .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 5db5ebed35da..2485eaa23101 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -230,8 +230,9 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
230 free_pages((unsigned long)cpu, order); 230 free_pages((unsigned long)cpu, order);
231} 231}
232 232
233static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz, 233static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
234 enum dma_data_direction direction) 234 unsigned long offset, size_t sz,
235 enum dma_data_direction direction)
235{ 236{
236 struct iommu *iommu; 237 struct iommu *iommu;
237 unsigned long flags, npages, oaddr; 238 unsigned long flags, npages, oaddr;
@@ -245,7 +246,7 @@ static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
245 if (unlikely(direction == DMA_NONE)) 246 if (unlikely(direction == DMA_NONE))
246 goto bad; 247 goto bad;
247 248
248 oaddr = (unsigned long)ptr; 249 oaddr = (unsigned long)(page_address(page) + offset);
249 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 250 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
250 npages >>= IO_PAGE_SHIFT; 251 npages >>= IO_PAGE_SHIFT;
251 252
@@ -294,8 +295,8 @@ iommu_map_fail:
294 return DMA_ERROR_CODE; 295 return DMA_ERROR_CODE;
295} 296}
296 297
297static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr, 298static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
298 size_t sz, enum dma_data_direction direction) 299 size_t sz, enum dma_data_direction direction)
299{ 300{
300 struct pci_pbm_info *pbm; 301 struct pci_pbm_info *pbm;
301 struct iommu *iommu; 302 struct iommu *iommu;
@@ -537,8 +538,8 @@ static void dma_4v_sync_sg_for_cpu(struct device *dev,
537static const struct dma_ops sun4v_dma_ops = { 538static const struct dma_ops sun4v_dma_ops = {
538 .alloc_coherent = dma_4v_alloc_coherent, 539 .alloc_coherent = dma_4v_alloc_coherent,
539 .free_coherent = dma_4v_free_coherent, 540 .free_coherent = dma_4v_free_coherent,
540 .map_single = dma_4v_map_single, 541 .map_page = dma_4v_map_page,
541 .unmap_single = dma_4v_unmap_single, 542 .unmap_page = dma_4v_unmap_page,
542 .map_sg = dma_4v_map_sg, 543 .map_sg = dma_4v_map_sg,
543 .unmap_sg = dma_4v_unmap_sg, 544 .unmap_sg = dma_4v_unmap_sg,
544 .sync_single_for_cpu = dma_4v_sync_single_for_cpu, 545 .sync_single_for_cpu = dma_4v_sync_single_for_cpu,