diff options
author | Becky Bruce <becky.bruce@freescale.com> | 2008-09-08 05:09:53 -0400 |
---|---|---|
committer | Kumar Gala <galak@kernel.crashing.org> | 2008-09-24 17:26:42 -0400 |
commit | 8dd0e95206f7c33bed2aed33ac668335174684e8 (patch) | |
tree | fc338d97889c873bc6cf241da8469763f9d05629 /arch/powerpc/kernel/dma.c | |
parent | 7c05d7e08d907d66b8e18515572f42c71fb709fe (diff) |
powerpc: Move iommu dma ops from dma.c to dma-iommu.c
32-bit platforms are about to start using dma.c; move the iommu
dma ops into their own file to make this a bit cleaner.
Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/dma.c')
-rw-r--r-- | arch/powerpc/kernel/dma.c | 98 |
1 files changed, 1 insertions, 97 deletions
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index ae5708e3a312..44e3486419e1 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
@@ -2,111 +2,15 @@ | |||
2 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation | 2 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation |
3 | * | 3 | * |
4 | * Provide default implementations of the DMA mapping callbacks for | 4 | * Provide default implementations of the DMA mapping callbacks for |
5 | * directly mapped busses and busses using the iommu infrastructure | 5 | * directly mapped busses. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/device.h> | 8 | #include <linux/device.h> |
9 | #include <linux/dma-mapping.h> | 9 | #include <linux/dma-mapping.h> |
10 | #include <asm/bug.h> | 10 | #include <asm/bug.h> |
11 | #include <asm/iommu.h> | ||
12 | #include <asm/abs_addr.h> | 11 | #include <asm/abs_addr.h> |
13 | 12 | ||
14 | /* | 13 | /* |
15 | * Generic iommu implementation | ||
16 | */ | ||
17 | |||
18 | /* Allocates a contiguous real buffer and creates mappings over it. | ||
19 | * Returns the virtual address of the buffer and sets dma_handle | ||
20 | * to the dma address (mapping) of the first page. | ||
21 | */ | ||
22 | static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, | ||
23 | dma_addr_t *dma_handle, gfp_t flag) | ||
24 | { | ||
25 | return iommu_alloc_coherent(dev, dev->archdata.dma_data, size, | ||
26 | dma_handle, device_to_mask(dev), flag, | ||
27 | dev->archdata.numa_node); | ||
28 | } | ||
29 | |||
30 | static void dma_iommu_free_coherent(struct device *dev, size_t size, | ||
31 | void *vaddr, dma_addr_t dma_handle) | ||
32 | { | ||
33 | iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle); | ||
34 | } | ||
35 | |||
36 | /* Creates TCEs for a user provided buffer. The user buffer must be | ||
37 | * contiguous real kernel storage (not vmalloc). The address of the buffer | ||
38 | * passed here is the kernel (virtual) address of the buffer. The buffer | ||
39 | * need not be page aligned, the dma_addr_t returned will point to the same | ||
40 | * byte within the page as vaddr. | ||
41 | */ | ||
42 | static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr, | ||
43 | size_t size, | ||
44 | enum dma_data_direction direction, | ||
45 | struct dma_attrs *attrs) | ||
46 | { | ||
47 | return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size, | ||
48 | device_to_mask(dev), direction, attrs); | ||
49 | } | ||
50 | |||
51 | |||
52 | static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle, | ||
53 | size_t size, | ||
54 | enum dma_data_direction direction, | ||
55 | struct dma_attrs *attrs) | ||
56 | { | ||
57 | iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction, | ||
58 | attrs); | ||
59 | } | ||
60 | |||
61 | |||
62 | static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, | ||
63 | int nelems, enum dma_data_direction direction, | ||
64 | struct dma_attrs *attrs) | ||
65 | { | ||
66 | return iommu_map_sg(dev, dev->archdata.dma_data, sglist, nelems, | ||
67 | device_to_mask(dev), direction, attrs); | ||
68 | } | ||
69 | |||
70 | static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
71 | int nelems, enum dma_data_direction direction, | ||
72 | struct dma_attrs *attrs) | ||
73 | { | ||
74 | iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction, | ||
75 | attrs); | ||
76 | } | ||
77 | |||
78 | /* We support DMA to/from any memory page via the iommu */ | ||
79 | static int dma_iommu_dma_supported(struct device *dev, u64 mask) | ||
80 | { | ||
81 | struct iommu_table *tbl = dev->archdata.dma_data; | ||
82 | |||
83 | if (!tbl || tbl->it_offset > mask) { | ||
84 | printk(KERN_INFO | ||
85 | "Warning: IOMMU offset too big for device mask\n"); | ||
86 | if (tbl) | ||
87 | printk(KERN_INFO | ||
88 | "mask: 0x%08lx, table offset: 0x%08lx\n", | ||
89 | mask, tbl->it_offset); | ||
90 | else | ||
91 | printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", | ||
92 | mask); | ||
93 | return 0; | ||
94 | } else | ||
95 | return 1; | ||
96 | } | ||
97 | |||
98 | struct dma_mapping_ops dma_iommu_ops = { | ||
99 | .alloc_coherent = dma_iommu_alloc_coherent, | ||
100 | .free_coherent = dma_iommu_free_coherent, | ||
101 | .map_single = dma_iommu_map_single, | ||
102 | .unmap_single = dma_iommu_unmap_single, | ||
103 | .map_sg = dma_iommu_map_sg, | ||
104 | .unmap_sg = dma_iommu_unmap_sg, | ||
105 | .dma_supported = dma_iommu_dma_supported, | ||
106 | }; | ||
107 | EXPORT_SYMBOL(dma_iommu_ops); | ||
108 | |||
109 | /* | ||
110 | * Generic direct DMA implementation | 14 | * Generic direct DMA implementation |
111 | * | 15 | * |
112 | * This implementation supports a per-device offset that can be applied if | 16 | * This implementation supports a per-device offset that can be applied if |