aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2010-08-10 21:03:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-11 11:59:21 -0400
commit4565f0170dfc849b3629c27d769db800467baa62 (patch)
treea2c70fb6f2f4826749b704ed83557623ca95bca5 /arch
parenta6eb9fe105d5de0053b261148cee56c94b4720ca (diff)
dma-mapping: unify dma_get_cache_alignment implementations
dma_get_cache_alignment returns the minimum DMA alignment. Architectures defines it as ARCH_DMA_MINALIGN (formally ARCH_KMALLOC_MINALIGN). So we can unify dma_get_cache_alignment implementations. Note that some architectures implement dma_get_cache_alignment wrongly. dma_get_cache_alignment() should return the minimum DMA alignment. So fully-coherent architectures should return 1. This patch also fixes this issue. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: <linux-arch@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/dma-mapping.h1
-rw-r--r--arch/arm/include/asm/dma-mapping.h5
-rw-r--r--arch/avr32/include/asm/dma-mapping.h5
-rw-r--r--arch/blackfin/include/asm/dma-mapping.h1
-rw-r--r--arch/cris/include/asm/dma-mapping.h6
-rw-r--r--arch/frv/include/asm/dma-mapping.h6
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/ia64/kernel/setup.c6
-rw-r--r--arch/m68k/include/asm/dma-mapping.h5
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h5
-rw-r--r--arch/mips/include/asm/dma-mapping.h7
-rw-r--r--arch/mn10300/include/asm/dma-mapping.h6
-rw-r--r--arch/parisc/include/asm/dma-mapping.h6
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h15
-rw-r--r--arch/sh/include/asm/dma-mapping.h9
-rw-r--r--arch/sparc/include/asm/dma-mapping.h9
-rw-r--r--arch/tile/include/asm/dma-mapping.h7
-rw-r--r--arch/x86/include/asm/dma-mapping.h7
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h6
19 files changed, 0 insertions, 114 deletions
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index 1bce8169733c..b3423d96acc7 100644
--- a/arch/alpha/include/asm/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
@@ -44,6 +44,5 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
44#define dma_is_consistent(d, h) (1) 44#define dma_is_consistent(d, h) (1)
45 45
46#define dma_cache_sync(dev, va, size, dir) ((void)0) 46#define dma_cache_sync(dev, va, size, dir) ((void)0)
47#define dma_get_cache_alignment() L1_CACHE_BYTES
48 47
49#endif /* _ALPHA_DMA_MAPPING_H */ 48#endif /* _ALPHA_DMA_MAPPING_H */
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 69ce0727edb5..f4a996d5ae96 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -144,11 +144,6 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
144 return 0; 144 return 0;
145} 145}
146 146
147static inline int dma_get_cache_alignment(void)
148{
149 return 32;
150}
151
152static inline int dma_is_consistent(struct device *dev, dma_addr_t handle) 147static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
153{ 148{
154 return !!arch_is_coherent(); 149 return !!arch_is_coherent();
diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h
index 0399359ab5d8..af6b81655074 100644
--- a/arch/avr32/include/asm/dma-mapping.h
+++ b/arch/avr32/include/asm/dma-mapping.h
@@ -341,9 +341,4 @@ static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
341 return 1; 341 return 1;
342} 342}
343 343
344static inline int dma_get_cache_alignment(void)
345{
346 return boot_cpu_data.dcache.linesz;
347}
348
349#endif /* __ASM_AVR32_DMA_MAPPING_H */ 344#endif /* __ASM_AVR32_DMA_MAPPING_H */
diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h
index 212cb80fd74b..6694a0f55de9 100644
--- a/arch/blackfin/include/asm/dma-mapping.h
+++ b/arch/blackfin/include/asm/dma-mapping.h
@@ -21,7 +21,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
21#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 21#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
22#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 22#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
23#define dma_supported(d, m) (1) 23#define dma_supported(d, m) (1)
24#define dma_get_cache_alignment() (32)
25#define dma_is_consistent(d, h) (1) 24#define dma_is_consistent(d, h) (1)
26 25
27static inline int 26static inline int
diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h
index da8ef8e8f842..fc30fd0b2a04 100644
--- a/arch/cris/include/asm/dma-mapping.h
+++ b/arch/cris/include/asm/dma-mapping.h
@@ -152,12 +152,6 @@ dma_set_mask(struct device *dev, u64 mask)
152 return 0; 152 return 0;
153} 153}
154 154
155static inline int
156dma_get_cache_alignment(void)
157{
158 return (1 << INTERNODE_CACHE_SHIFT);
159}
160
161#define dma_is_consistent(d, h) (1) 155#define dma_is_consistent(d, h) (1)
162 156
163static inline void 157static inline void
diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h
index 6af5d83e2fb2..7b05ce14177e 100644
--- a/arch/frv/include/asm/dma-mapping.h
+++ b/arch/frv/include/asm/dma-mapping.h
@@ -125,12 +125,6 @@ int dma_set_mask(struct device *dev, u64 mask)
125 return 0; 125 return 0;
126} 126}
127 127
128static inline
129int dma_get_cache_alignment(void)
130{
131 return 1 << L1_CACHE_SHIFT;
132}
133
134#define dma_is_consistent(d, h) (1) 128#define dma_is_consistent(d, h) (1)
135 129
136static inline 130static inline
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 7d09a09cdaad..8d52deed3750 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -86,8 +86,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
86 return daddr; 86 return daddr;
87} 87}
88 88
89extern int dma_get_cache_alignment(void);
90
91static inline void 89static inline void
92dma_cache_sync (struct device *dev, void *vaddr, size_t size, 90dma_cache_sync (struct device *dev, void *vaddr, size_t size,
93 enum dma_data_direction dir) 91 enum dma_data_direction dir)
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 41ae6a596b50..8fb958abf8d0 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -98,12 +98,6 @@ static struct resource bss_resource = {
98 98
99unsigned long ia64_max_cacheline_size; 99unsigned long ia64_max_cacheline_size;
100 100
101int dma_get_cache_alignment(void)
102{
103 return ia64_max_cacheline_size;
104}
105EXPORT_SYMBOL(dma_get_cache_alignment);
106
107unsigned long ia64_iobase; /* virtual address for I/O accesses */ 101unsigned long ia64_iobase; /* virtual address for I/O accesses */
108EXPORT_SYMBOL(ia64_iobase); 102EXPORT_SYMBOL(ia64_iobase);
109struct io_space io_space[MAX_IO_SPACES]; 103struct io_space io_space[MAX_IO_SPACES];
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h
index 26f505488c11..a1ae732c7247 100644
--- a/arch/m68k/include/asm/dma-mapping.h
+++ b/arch/m68k/include/asm/dma-mapping.h
@@ -16,11 +16,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
16 return 0; 16 return 0;
17} 17}
18 18
19static inline int dma_get_cache_alignment(void)
20{
21 return 1 << L1_CACHE_SHIFT;
22}
23
24static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) 19static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
25{ 20{
26 return 0; 21 return 0;
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 507389580709..21df7cbae65e 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -132,11 +132,6 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
132 ops->free_coherent(dev, size, cpu_addr, dma_handle); 132 ops->free_coherent(dev, size, cpu_addr, dma_handle);
133} 133}
134 134
135static inline int dma_get_cache_alignment(void)
136{
137 return L1_CACHE_BYTES;
138}
139
140static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 135static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
141 enum dma_data_direction direction) 136 enum dma_data_direction direction)
142{ 137{
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 664ba53dc32a..d724a15f0438 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -62,13 +62,6 @@ dma_set_mask(struct device *dev, u64 mask)
62 return 0; 62 return 0;
63} 63}
64 64
65static inline int
66dma_get_cache_alignment(void)
67{
68 /* XXX Largest on any MIPS */
69 return 128;
70}
71
72extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr); 65extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr);
73 66
74extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 67extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h
index 4ed1522b38d2..8d452a657795 100644
--- a/arch/mn10300/include/asm/dma-mapping.h
+++ b/arch/mn10300/include/asm/dma-mapping.h
@@ -161,12 +161,6 @@ int dma_set_mask(struct device *dev, u64 mask)
161 return 0; 161 return 0;
162} 162}
163 163
164static inline
165int dma_get_cache_alignment(void)
166{
167 return 1 << L1_CACHE_SHIFT;
168}
169
170#define dma_is_consistent(d) (1) 164#define dma_is_consistent(d) (1)
171 165
172static inline 166static inline
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index da6943380908..44d3f62ccf9d 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -185,12 +185,6 @@ dma_set_mask(struct device *dev, u64 mask)
185} 185}
186 186
187static inline int 187static inline int
188dma_get_cache_alignment(void)
189{
190 return dcache_stride;
191}
192
193static inline int
194dma_is_consistent(struct device *dev, dma_addr_t dma_addr) 188dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
195{ 189{
196 return (hppa_dma_ops->dma_sync_single_for_cpu == NULL); 190 return (hppa_dma_ops->dma_sync_single_for_cpu == NULL);
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index c85ef230135b..a77ba280af04 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -215,21 +215,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
215#define dma_is_consistent(d, h) (1) 215#define dma_is_consistent(d, h) (1)
216#endif 216#endif
217 217
218static inline int dma_get_cache_alignment(void)
219{
220#ifdef CONFIG_PPC64
221 /* no easy way to get cache size on all processors, so return
222 * the maximum possible, to be safe */
223 return (1 << INTERNODE_CACHE_SHIFT);
224#else
225 /*
226 * Each processor family will define its own L1_CACHE_SHIFT,
227 * L1_CACHE_BYTES wraps to this, so this is always safe.
228 */
229 return L1_CACHE_BYTES;
230#endif
231}
232
233static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 218static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
234 enum dma_data_direction direction) 219 enum dma_data_direction direction)
235{ 220{
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index bea3337a426a..6bb5cc9decf8 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -48,15 +48,6 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
48#define dma_is_consistent(d, h) (0) 48#define dma_is_consistent(d, h) (0)
49#endif 49#endif
50 50
51static inline int dma_get_cache_alignment(void)
52{
53 /*
54 * Each processor family will define its own L1_CACHE_SHIFT,
55 * L1_CACHE_BYTES wraps to this, so this is always safe.
56 */
57 return L1_CACHE_BYTES;
58}
59
60static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 51static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
61{ 52{
62 struct dma_map_ops *ops = get_dma_ops(dev); 53 struct dma_map_ops *ops = get_dma_ops(dev);
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 4b4a0c0b0ccd..74db853ec2cf 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -52,15 +52,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
52 return (dma_addr == DMA_ERROR_CODE); 52 return (dma_addr == DMA_ERROR_CODE);
53} 53}
54 54
55static inline int dma_get_cache_alignment(void)
56{
57 /*
58 * no easy way to get cache size on all processors, so return
59 * the maximum possible, to be safe
60 */
61 return (1 << INTERNODE_CACHE_SHIFT);
62}
63
64static inline int dma_set_mask(struct device *dev, u64 mask) 55static inline int dma_set_mask(struct device *dev, u64 mask)
65{ 56{
66#ifdef CONFIG_PCI 57#ifdef CONFIG_PCI
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
index cf466b39aa13..1326b910fec6 100644
--- a/arch/tile/include/asm/dma-mapping.h
+++ b/arch/tile/include/asm/dma-mapping.h
@@ -90,13 +90,6 @@ dma_set_mask(struct device *dev, u64 mask)
90 return 0; 90 return 0;
91} 91}
92 92
93static inline int
94dma_get_cache_alignment(void)
95{
96 return L2_CACHE_BYTES;
97}
98
99#define dma_is_consistent(d, h) (1) 93#define dma_is_consistent(d, h) (1)
100 94
101
102#endif /* _ASM_TILE_DMA_MAPPING_H */ 95#endif /* _ASM_TILE_DMA_MAPPING_H */
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index ac91eed21061..f9c67e83f648 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -87,13 +87,6 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
87 flush_write_buffers(); 87 flush_write_buffers();
88} 88}
89 89
90static inline int dma_get_cache_alignment(void)
91{
92 /* no easy way to get cache size on all x86, so return the
93 * maximum possible, to be safe */
94 return boot_cpu_data.x86_clflush_size;
95}
96
97static inline unsigned long dma_alloc_coherent_mask(struct device *dev, 90static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
98 gfp_t gfp) 91 gfp_t gfp)
99{ 92{
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index 51882ae3db4d..7104f2f9823e 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -161,12 +161,6 @@ dma_set_mask(struct device *dev, u64 mask)
161 return 0; 161 return 0;
162} 162}
163 163
164static inline int
165dma_get_cache_alignment(void)
166{
167 return L1_CACHE_BYTES;
168}
169
170#define dma_is_consistent(d, h) (1) 164#define dma_is_consistent(d, h) (1)
171 165
172static inline void 166static inline void