diff options
-rw-r--r-- | arch/powerpc/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/dma-iommu.c | 103 | ||||
-rw-r--r-- | arch/powerpc/kernel/dma.c | 98 |
3 files changed, 105 insertions, 98 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index ab7d4cbf1e00..09b3cabf2f91 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -73,7 +73,7 @@ obj-y += time.o prom.o traps.o setup-common.o \ | |||
73 | udbg.o misc.o io.o \ | 73 | udbg.o misc.o io.o \ |
74 | misc_$(CONFIG_WORD_SIZE).o | 74 | misc_$(CONFIG_WORD_SIZE).o |
75 | obj-$(CONFIG_PPC32) += entry_32.o setup_32.o | 75 | obj-$(CONFIG_PPC32) += entry_32.o setup_32.o |
76 | obj-$(CONFIG_PPC64) += dma.o iommu.o | 76 | obj-$(CONFIG_PPC64) += dma.o dma-iommu.o iommu.o |
77 | obj-$(CONFIG_KGDB) += kgdb.o | 77 | obj-$(CONFIG_KGDB) += kgdb.o |
78 | obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o | 78 | obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o |
79 | obj-$(CONFIG_MODULES) += ppc_ksyms.o | 79 | obj-$(CONFIG_MODULES) += ppc_ksyms.o |
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c new file mode 100644 index 000000000000..01091f1d508c --- /dev/null +++ b/arch/powerpc/kernel/dma-iommu.c | |||
@@ -0,0 +1,103 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation | ||
3 | * | ||
4 | * Provide default implementations of the DMA mapping callbacks for | ||
5 | * busses using the iommu infrastructure | ||
6 | */ | ||
7 | |||
8 | #include <asm/iommu.h> | ||
9 | |||
10 | /* | ||
11 | * Generic iommu implementation | ||
12 | */ | ||
13 | |||
14 | /* Allocates a contiguous real buffer and creates mappings over it. | ||
15 | * Returns the virtual address of the buffer and sets dma_handle | ||
16 | * to the dma address (mapping) of the first page. | ||
17 | */ | ||
18 | static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, | ||
19 | dma_addr_t *dma_handle, gfp_t flag) | ||
20 | { | ||
21 | return iommu_alloc_coherent(dev, dev->archdata.dma_data, size, | ||
22 | dma_handle, device_to_mask(dev), flag, | ||
23 | dev->archdata.numa_node); | ||
24 | } | ||
25 | |||
26 | static void dma_iommu_free_coherent(struct device *dev, size_t size, | ||
27 | void *vaddr, dma_addr_t dma_handle) | ||
28 | { | ||
29 | iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle); | ||
30 | } | ||
31 | |||
32 | /* Creates TCEs for a user provided buffer. The user buffer must be | ||
33 | * contiguous real kernel storage (not vmalloc). The address of the buffer | ||
34 | * passed here is the kernel (virtual) address of the buffer. The buffer | ||
35 | * need not be page aligned, the dma_addr_t returned will point to the same | ||
36 | * byte within the page as vaddr. | ||
37 | */ | ||
38 | static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr, | ||
39 | size_t size, | ||
40 | enum dma_data_direction direction, | ||
41 | struct dma_attrs *attrs) | ||
42 | { | ||
43 | return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size, | ||
44 | device_to_mask(dev), direction, attrs); | ||
45 | } | ||
46 | |||
47 | |||
48 | static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle, | ||
49 | size_t size, | ||
50 | enum dma_data_direction direction, | ||
51 | struct dma_attrs *attrs) | ||
52 | { | ||
53 | iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction, | ||
54 | attrs); | ||
55 | } | ||
56 | |||
57 | |||
58 | static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, | ||
59 | int nelems, enum dma_data_direction direction, | ||
60 | struct dma_attrs *attrs) | ||
61 | { | ||
62 | return iommu_map_sg(dev, dev->archdata.dma_data, sglist, nelems, | ||
63 | device_to_mask(dev), direction, attrs); | ||
64 | } | ||
65 | |||
66 | static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
67 | int nelems, enum dma_data_direction direction, | ||
68 | struct dma_attrs *attrs) | ||
69 | { | ||
70 | iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction, | ||
71 | attrs); | ||
72 | } | ||
73 | |||
74 | /* We support DMA to/from any memory page via the iommu */ | ||
75 | static int dma_iommu_dma_supported(struct device *dev, u64 mask) | ||
76 | { | ||
77 | struct iommu_table *tbl = dev->archdata.dma_data; | ||
78 | |||
79 | if (!tbl || tbl->it_offset > mask) { | ||
80 | printk(KERN_INFO | ||
81 | "Warning: IOMMU offset too big for device mask\n"); | ||
82 | if (tbl) | ||
83 | printk(KERN_INFO | ||
84 | "mask: 0x%08lx, table offset: 0x%08lx\n", | ||
85 | mask, tbl->it_offset); | ||
86 | else | ||
87 | printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", | ||
88 | mask); | ||
89 | return 0; | ||
90 | } else | ||
91 | return 1; | ||
92 | } | ||
93 | |||
94 | struct dma_mapping_ops dma_iommu_ops = { | ||
95 | .alloc_coherent = dma_iommu_alloc_coherent, | ||
96 | .free_coherent = dma_iommu_free_coherent, | ||
97 | .map_single = dma_iommu_map_single, | ||
98 | .unmap_single = dma_iommu_unmap_single, | ||
99 | .map_sg = dma_iommu_map_sg, | ||
100 | .unmap_sg = dma_iommu_unmap_sg, | ||
101 | .dma_supported = dma_iommu_dma_supported, | ||
102 | }; | ||
103 | EXPORT_SYMBOL(dma_iommu_ops); | ||
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index ae5708e3a312..44e3486419e1 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
@@ -2,111 +2,15 @@ | |||
2 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation | 2 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation |
3 | * | 3 | * |
4 | * Provide default implementations of the DMA mapping callbacks for | 4 | * Provide default implementations of the DMA mapping callbacks for |
5 | * directly mapped busses and busses using the iommu infrastructure | 5 | * directly mapped busses. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/device.h> | 8 | #include <linux/device.h> |
9 | #include <linux/dma-mapping.h> | 9 | #include <linux/dma-mapping.h> |
10 | #include <asm/bug.h> | 10 | #include <asm/bug.h> |
11 | #include <asm/iommu.h> | ||
12 | #include <asm/abs_addr.h> | 11 | #include <asm/abs_addr.h> |
13 | 12 | ||
14 | /* | 13 | /* |
15 | * Generic iommu implementation | ||
16 | */ | ||
17 | |||
18 | /* Allocates a contiguous real buffer and creates mappings over it. | ||
19 | * Returns the virtual address of the buffer and sets dma_handle | ||
20 | * to the dma address (mapping) of the first page. | ||
21 | */ | ||
22 | static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, | ||
23 | dma_addr_t *dma_handle, gfp_t flag) | ||
24 | { | ||
25 | return iommu_alloc_coherent(dev, dev->archdata.dma_data, size, | ||
26 | dma_handle, device_to_mask(dev), flag, | ||
27 | dev->archdata.numa_node); | ||
28 | } | ||
29 | |||
30 | static void dma_iommu_free_coherent(struct device *dev, size_t size, | ||
31 | void *vaddr, dma_addr_t dma_handle) | ||
32 | { | ||
33 | iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle); | ||
34 | } | ||
35 | |||
36 | /* Creates TCEs for a user provided buffer. The user buffer must be | ||
37 | * contiguous real kernel storage (not vmalloc). The address of the buffer | ||
38 | * passed here is the kernel (virtual) address of the buffer. The buffer | ||
39 | * need not be page aligned, the dma_addr_t returned will point to the same | ||
40 | * byte within the page as vaddr. | ||
41 | */ | ||
42 | static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr, | ||
43 | size_t size, | ||
44 | enum dma_data_direction direction, | ||
45 | struct dma_attrs *attrs) | ||
46 | { | ||
47 | return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size, | ||
48 | device_to_mask(dev), direction, attrs); | ||
49 | } | ||
50 | |||
51 | |||
52 | static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle, | ||
53 | size_t size, | ||
54 | enum dma_data_direction direction, | ||
55 | struct dma_attrs *attrs) | ||
56 | { | ||
57 | iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction, | ||
58 | attrs); | ||
59 | } | ||
60 | |||
61 | |||
62 | static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, | ||
63 | int nelems, enum dma_data_direction direction, | ||
64 | struct dma_attrs *attrs) | ||
65 | { | ||
66 | return iommu_map_sg(dev, dev->archdata.dma_data, sglist, nelems, | ||
67 | device_to_mask(dev), direction, attrs); | ||
68 | } | ||
69 | |||
70 | static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
71 | int nelems, enum dma_data_direction direction, | ||
72 | struct dma_attrs *attrs) | ||
73 | { | ||
74 | iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction, | ||
75 | attrs); | ||
76 | } | ||
77 | |||
78 | /* We support DMA to/from any memory page via the iommu */ | ||
79 | static int dma_iommu_dma_supported(struct device *dev, u64 mask) | ||
80 | { | ||
81 | struct iommu_table *tbl = dev->archdata.dma_data; | ||
82 | |||
83 | if (!tbl || tbl->it_offset > mask) { | ||
84 | printk(KERN_INFO | ||
85 | "Warning: IOMMU offset too big for device mask\n"); | ||
86 | if (tbl) | ||
87 | printk(KERN_INFO | ||
88 | "mask: 0x%08lx, table offset: 0x%08lx\n", | ||
89 | mask, tbl->it_offset); | ||
90 | else | ||
91 | printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", | ||
92 | mask); | ||
93 | return 0; | ||
94 | } else | ||
95 | return 1; | ||
96 | } | ||
97 | |||
98 | struct dma_mapping_ops dma_iommu_ops = { | ||
99 | .alloc_coherent = dma_iommu_alloc_coherent, | ||
100 | .free_coherent = dma_iommu_free_coherent, | ||
101 | .map_single = dma_iommu_map_single, | ||
102 | .unmap_single = dma_iommu_unmap_single, | ||
103 | .map_sg = dma_iommu_map_sg, | ||
104 | .unmap_sg = dma_iommu_unmap_sg, | ||
105 | .dma_supported = dma_iommu_dma_supported, | ||
106 | }; | ||
107 | EXPORT_SYMBOL(dma_iommu_ops); | ||
108 | |||
109 | /* | ||
110 | * Generic direct DMA implementation | 14 | * Generic direct DMA implementation |
111 | * | 15 | * |
112 | * This implementation supports a per-device offset that can be applied if | 16 | * This implementation supports a per-device offset that can be applied if |