diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-10-19 23:55:56 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-10-19 23:55:56 -0400 |
commit | 73c926bee0e4b7739bbb992a0a3df561178dd522 (patch) | |
tree | 1b57464ef1a105911ddd9dab514e404fa2aa7cb2 /arch/sh | |
parent | 14c011deb4cb906d72b6b2b6880e21c3cc110fcc (diff) |
sh: Convert to asm-generic/dma-mapping-common.h
This converts the old DMA mapping support to the new generic
dma-mapping-common.h abstraction.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/Kconfig | 1 | ||||
-rw-r--r-- | arch/sh/include/asm/dma-mapping.h | 200 | ||||
-rw-r--r-- | arch/sh/include/asm/pci.h | 10 | ||||
-rw-r--r-- | arch/sh/kernel/Makefile | 3 | ||||
-rw-r--r-- | arch/sh/kernel/dma-nommu.c | 76 | ||||
-rw-r--r-- | arch/sh/mm/consistent.c | 6 |
6 files changed, 112 insertions, 184 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 2e8589a6fd2f..2d3a69993858 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -16,6 +16,7 @@ config SUPERH | |||
16 | select HAVE_IOREMAP_PROT if MMU | 16 | select HAVE_IOREMAP_PROT if MMU |
17 | select HAVE_ARCH_TRACEHOOK | 17 | select HAVE_ARCH_TRACEHOOK |
18 | select HAVE_DMA_API_DEBUG | 18 | select HAVE_DMA_API_DEBUG |
19 | select HAVE_DMA_ATTRS | ||
19 | select HAVE_PERF_EVENTS | 20 | select HAVE_PERF_EVENTS |
20 | select HAVE_KERNEL_GZIP | 21 | select HAVE_KERNEL_GZIP |
21 | select HAVE_KERNEL_BZIP2 | 22 | select HAVE_KERNEL_BZIP2 |
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 69d56dd4c968..b9a8f18f35a2 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h | |||
@@ -1,21 +1,32 @@ | |||
1 | #ifndef __ASM_SH_DMA_MAPPING_H | 1 | #ifndef __ASM_SH_DMA_MAPPING_H |
2 | #define __ASM_SH_DMA_MAPPING_H | 2 | #define __ASM_SH_DMA_MAPPING_H |
3 | 3 | ||
4 | #include <linux/mm.h> | 4 | extern struct dma_map_ops *dma_ops; |
5 | #include <linux/scatterlist.h> | 5 | extern void no_iommu_init(void); |
6 | #include <linux/dma-debug.h> | ||
7 | #include <asm/cacheflush.h> | ||
8 | #include <asm/io.h> | ||
9 | #include <asm-generic/dma-coherent.h> | ||
10 | 6 | ||
11 | extern struct bus_type pci_bus_type; | 7 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
8 | { | ||
9 | return dma_ops; | ||
10 | } | ||
11 | |||
12 | static inline int dma_supported(struct device *dev, u64 mask) | ||
13 | { | ||
14 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
12 | 15 | ||
13 | #define dma_supported(dev, mask) (1) | 16 | if (ops->dma_supported) |
17 | return ops->dma_supported(dev, mask); | ||
18 | |||
19 | return 1; | ||
20 | } | ||
14 | 21 | ||
15 | static inline int dma_set_mask(struct device *dev, u64 mask) | 22 | static inline int dma_set_mask(struct device *dev, u64 mask) |
16 | { | 23 | { |
24 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
25 | |||
17 | if (!dev->dma_mask || !dma_supported(dev, mask)) | 26 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
18 | return -EIO; | 27 | return -EIO; |
28 | if (ops->set_dma_mask) | ||
29 | return ops->set_dma_mask(dev, mask); | ||
19 | 30 | ||
20 | *dev->dma_mask = mask; | 31 | *dev->dma_mask = mask; |
21 | 32 | ||
@@ -35,160 +46,6 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
35 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 46 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
36 | #define dma_is_consistent(d, h) (1) | 47 | #define dma_is_consistent(d, h) (1) |
37 | 48 | ||
38 | static inline dma_addr_t dma_map_single(struct device *dev, | ||
39 | void *ptr, size_t size, | ||
40 | enum dma_data_direction dir) | ||
41 | { | ||
42 | dma_addr_t addr = virt_to_phys(ptr); | ||
43 | |||
44 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | ||
45 | if (dev->bus == &pci_bus_type) | ||
46 | return addr; | ||
47 | #endif | ||
48 | dma_cache_sync(dev, ptr, size, dir); | ||
49 | |||
50 | debug_dma_map_page(dev, virt_to_page(ptr), | ||
51 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
52 | dir, addr, true); | ||
53 | |||
54 | return addr; | ||
55 | } | ||
56 | |||
57 | static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, | ||
58 | size_t size, enum dma_data_direction dir) | ||
59 | { | ||
60 | debug_dma_unmap_page(dev, addr, size, dir, true); | ||
61 | } | ||
62 | |||
63 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
64 | int nents, enum dma_data_direction dir) | ||
65 | { | ||
66 | int i; | ||
67 | |||
68 | for (i = 0; i < nents; i++) { | ||
69 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) | ||
70 | dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); | ||
71 | #endif | ||
72 | sg[i].dma_address = sg_phys(&sg[i]); | ||
73 | sg[i].dma_length = sg[i].length; | ||
74 | } | ||
75 | |||
76 | debug_dma_map_sg(dev, sg, nents, i, dir); | ||
77 | |||
78 | return nents; | ||
79 | } | ||
80 | |||
81 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
82 | int nents, enum dma_data_direction dir) | ||
83 | { | ||
84 | debug_dma_unmap_sg(dev, sg, nents, dir); | ||
85 | } | ||
86 | |||
87 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
88 | unsigned long offset, size_t size, | ||
89 | enum dma_data_direction dir) | ||
90 | { | ||
91 | return dma_map_single(dev, page_address(page) + offset, size, dir); | ||
92 | } | ||
93 | |||
94 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
95 | size_t size, enum dma_data_direction dir) | ||
96 | { | ||
97 | dma_unmap_single(dev, dma_address, size, dir); | ||
98 | } | ||
99 | |||
100 | static inline void __dma_sync_single(struct device *dev, dma_addr_t dma_handle, | ||
101 | size_t size, enum dma_data_direction dir) | ||
102 | { | ||
103 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | ||
104 | if (dev->bus == &pci_bus_type) | ||
105 | return; | ||
106 | #endif | ||
107 | dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir); | ||
108 | } | ||
109 | |||
110 | static inline void dma_sync_single_range(struct device *dev, | ||
111 | dma_addr_t dma_handle, | ||
112 | unsigned long offset, size_t size, | ||
113 | enum dma_data_direction dir) | ||
114 | { | ||
115 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | ||
116 | if (dev->bus == &pci_bus_type) | ||
117 | return; | ||
118 | #endif | ||
119 | dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir); | ||
120 | } | ||
121 | |||
122 | static inline void __dma_sync_sg(struct device *dev, struct scatterlist *sg, | ||
123 | int nelems, enum dma_data_direction dir) | ||
124 | { | ||
125 | int i; | ||
126 | |||
127 | for (i = 0; i < nelems; i++) { | ||
128 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) | ||
129 | dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); | ||
130 | #endif | ||
131 | sg[i].dma_address = sg_phys(&sg[i]); | ||
132 | sg[i].dma_length = sg[i].length; | ||
133 | } | ||
134 | } | ||
135 | |||
136 | static inline void dma_sync_single_for_cpu(struct device *dev, | ||
137 | dma_addr_t dma_handle, size_t size, | ||
138 | enum dma_data_direction dir) | ||
139 | { | ||
140 | __dma_sync_single(dev, dma_handle, size, dir); | ||
141 | debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir); | ||
142 | } | ||
143 | |||
144 | static inline void dma_sync_single_for_device(struct device *dev, | ||
145 | dma_addr_t dma_handle, | ||
146 | size_t size, | ||
147 | enum dma_data_direction dir) | ||
148 | { | ||
149 | __dma_sync_single(dev, dma_handle, size, dir); | ||
150 | debug_dma_sync_single_for_device(dev, dma_handle, size, dir); | ||
151 | } | ||
152 | |||
153 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | ||
154 | dma_addr_t dma_handle, | ||
155 | unsigned long offset, | ||
156 | size_t size, | ||
157 | enum dma_data_direction direction) | ||
158 | { | ||
159 | dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); | ||
160 | debug_dma_sync_single_range_for_cpu(dev, dma_handle, | ||
161 | offset, size, direction); | ||
162 | } | ||
163 | |||
164 | static inline void dma_sync_single_range_for_device(struct device *dev, | ||
165 | dma_addr_t dma_handle, | ||
166 | unsigned long offset, | ||
167 | size_t size, | ||
168 | enum dma_data_direction direction) | ||
169 | { | ||
170 | dma_sync_single_for_device(dev, dma_handle+offset, size, direction); | ||
171 | debug_dma_sync_single_range_for_device(dev, dma_handle, | ||
172 | offset, size, direction); | ||
173 | } | ||
174 | |||
175 | |||
176 | static inline void dma_sync_sg_for_cpu(struct device *dev, | ||
177 | struct scatterlist *sg, int nelems, | ||
178 | enum dma_data_direction dir) | ||
179 | { | ||
180 | __dma_sync_sg(dev, sg, nelems, dir); | ||
181 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); | ||
182 | } | ||
183 | |||
184 | static inline void dma_sync_sg_for_device(struct device *dev, | ||
185 | struct scatterlist *sg, int nelems, | ||
186 | enum dma_data_direction dir) | ||
187 | { | ||
188 | __dma_sync_sg(dev, sg, nelems, dir); | ||
189 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); | ||
190 | } | ||
191 | |||
192 | static inline int dma_get_cache_alignment(void) | 49 | static inline int dma_get_cache_alignment(void) |
193 | { | 50 | { |
194 | /* | 51 | /* |
@@ -200,20 +57,15 @@ static inline int dma_get_cache_alignment(void) | |||
200 | 57 | ||
201 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 58 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
202 | { | 59 | { |
203 | return dma_addr == 0; | 60 | struct dma_map_ops *ops = get_dma_ops(dev); |
204 | } | ||
205 | 61 | ||
206 | #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY | 62 | if (ops->mapping_error) |
63 | return ops->mapping_error(dev, dma_addr); | ||
207 | 64 | ||
208 | extern int | 65 | return dma_addr == 0; |
209 | dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | 66 | } |
210 | dma_addr_t device_addr, size_t size, int flags); | ||
211 | |||
212 | extern void | ||
213 | dma_release_declared_memory(struct device *dev); | ||
214 | 67 | ||
215 | extern void * | 68 | #include <asm-generic/dma-coherent.h> |
216 | dma_mark_declared_memory_occupied(struct device *dev, | 69 | #include <asm-generic/dma-mapping-common.h> |
217 | dma_addr_t device_addr, size_t size); | ||
218 | 70 | ||
219 | #endif /* __ASM_SH_DMA_MAPPING_H */ | 71 | #endif /* __ASM_SH_DMA_MAPPING_H */ |
diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index 4163950cd1c6..6bf276b4f85d 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h | |||
@@ -3,8 +3,6 @@ | |||
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | #include <linux/dma-mapping.h> | ||
7 | |||
8 | /* Can be used to override the logic in pci_scan_bus for skipping | 6 | /* Can be used to override the logic in pci_scan_bus for skipping |
9 | already-configured bus numbers - to be used for buggy BIOSes | 7 | already-configured bus numbers - to be used for buggy BIOSes |
10 | or architectures with incomplete PCI setup by the loader */ | 8 | or architectures with incomplete PCI setup by the loader */ |
@@ -54,13 +52,7 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) | |||
54 | * address space. The networking and block device layers use | 52 | * address space. The networking and block device layers use |
55 | * this boolean for bounce buffer decisions. | 53 | * this boolean for bounce buffer decisions. |
56 | */ | 54 | */ |
57 | #define PCI_DMA_BUS_IS_PHYS (1) | 55 | #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) |
58 | |||
59 | #include <linux/types.h> | ||
60 | #include <linux/slab.h> | ||
61 | #include <asm/scatterlist.h> | ||
62 | #include <linux/string.h> | ||
63 | #include <asm/io.h> | ||
64 | 56 | ||
65 | /* pci_unmap_{single,page} being a nop depends upon the | 57 | /* pci_unmap_{single,page} being a nop depends upon the |
66 | * configuration. | 58 | * configuration. |
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile index 6fe0fcdaf531..097ae5ceb0e3 100644 --- a/arch/sh/kernel/Makefile +++ b/arch/sh/kernel/Makefile | |||
@@ -11,7 +11,8 @@ endif | |||
11 | 11 | ||
12 | CFLAGS_REMOVE_return_address.o = -pg | 12 | CFLAGS_REMOVE_return_address.o = -pg |
13 | 13 | ||
14 | obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \ | 14 | obj-y := debugtraps.o dma-nommu.o dumpstack.o \ |
15 | idle.o io.o io_generic.o irq.o \ | ||
15 | irq_$(BITS).o machvec.o nmi_debug.o process_$(BITS).o \ | 16 | irq_$(BITS).o machvec.o nmi_debug.o process_$(BITS).o \ |
16 | ptrace_$(BITS).o return_address.o \ | 17 | ptrace_$(BITS).o return_address.o \ |
17 | setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o \ | 18 | setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o \ |
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c new file mode 100644 index 000000000000..e88fcebf860c --- /dev/null +++ b/arch/sh/kernel/dma-nommu.c | |||
@@ -0,0 +1,76 @@ | |||
1 | /* | ||
2 | * DMA mapping support for platforms lacking IOMMUs. | ||
3 | * | ||
4 | * Copyright (C) 2009 Paul Mundt | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #include <linux/dma-mapping.h> | ||
11 | #include <linux/io.h> | ||
12 | |||
13 | static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | ||
14 | unsigned long offset, size_t size, | ||
15 | enum dma_data_direction dir, | ||
16 | struct dma_attrs *attrs) | ||
17 | { | ||
18 | dma_addr_t addr = page_to_phys(page) + offset; | ||
19 | |||
20 | WARN_ON(size == 0); | ||
21 | dma_cache_sync(dev, page_address(page) + offset, size, dir); | ||
22 | |||
23 | return addr; | ||
24 | } | ||
25 | |||
26 | static int nommu_map_sg(struct device *dev, struct scatterlist *sg, | ||
27 | int nents, enum dma_data_direction dir, | ||
28 | struct dma_attrs *attrs) | ||
29 | { | ||
30 | struct scatterlist *s; | ||
31 | int i; | ||
32 | |||
33 | WARN_ON(nents == 0 || sg[0].length == 0); | ||
34 | |||
35 | for_each_sg(sg, s, nents, i) { | ||
36 | BUG_ON(!sg_page(s)); | ||
37 | |||
38 | dma_cache_sync(dev, sg_virt(s), s->length, dir); | ||
39 | |||
40 | s->dma_address = sg_phys(s); | ||
41 | s->dma_length = s->length; | ||
42 | } | ||
43 | |||
44 | return nents; | ||
45 | } | ||
46 | |||
47 | static void nommu_sync_single(struct device *dev, dma_addr_t addr, | ||
48 | size_t size, enum dma_data_direction dir) | ||
49 | { | ||
50 | dma_cache_sync(dev, phys_to_virt(addr), size, dir); | ||
51 | } | ||
52 | |||
53 | static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, | ||
54 | int nelems, enum dma_data_direction dir) | ||
55 | { | ||
56 | struct scatterlist *s; | ||
57 | int i; | ||
58 | |||
59 | for_each_sg(sg, s, nelems, i) | ||
60 | dma_cache_sync(dev, sg_virt(s), s->length, dir); | ||
61 | } | ||
62 | |||
63 | struct dma_map_ops nommu_dma_ops = { | ||
64 | .map_page = nommu_map_page, | ||
65 | .map_sg = nommu_map_sg, | ||
66 | .sync_single_for_device = nommu_sync_single, | ||
67 | .sync_sg_for_device = nommu_sync_sg, | ||
68 | .is_phys = 1, | ||
69 | }; | ||
70 | |||
71 | void __init no_iommu_init(void) | ||
72 | { | ||
73 | if (dma_ops) | ||
74 | return; | ||
75 | dma_ops = &nommu_dma_ops; | ||
76 | } | ||
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index 9a8403d9344b..1165161e472c 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c | |||
@@ -15,14 +15,20 @@ | |||
15 | #include <linux/dma-mapping.h> | 15 | #include <linux/dma-mapping.h> |
16 | #include <linux/dma-debug.h> | 16 | #include <linux/dma-debug.h> |
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | #include <linux/module.h> | ||
18 | #include <asm/cacheflush.h> | 19 | #include <asm/cacheflush.h> |
19 | #include <asm/addrspace.h> | 20 | #include <asm/addrspace.h> |
20 | 21 | ||
21 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | 22 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 |
22 | 23 | ||
24 | struct dma_map_ops *dma_ops; | ||
25 | EXPORT_SYMBOL(dma_ops); | ||
26 | |||
23 | static int __init dma_init(void) | 27 | static int __init dma_init(void) |
24 | { | 28 | { |
25 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | 29 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
30 | |||
31 | no_iommu_init(); | ||
26 | return 0; | 32 | return 0; |
27 | } | 33 | } |
28 | fs_initcall(dma_init); | 34 | fs_initcall(dma_init); |