diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2006-11-12 17:27:39 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-12-04 04:39:05 -0500 |
commit | 68a64357d15ae4f596e92715719071952006e83c (patch) | |
tree | dee519239225e92169ef77e4fad3be25c4dffe9d /include/asm-powerpc/eeh.h | |
parent | 3d1ea8e8cb4d497a2dd73176cc82095b8f193589 (diff) |
[POWERPC] Merge 32 and 64 bits asm-powerpc/io.h
powerpc: Merge 32 and 64 bits asm-powerpc/io.h
The rework on io.h done for the new hookable accessors made it easier,
so I just finished the work and merged 32 and 64 bits io.h for arch/powerpc.
arch/ppc still uses the old version in asm-ppc, there is just too much gunk
in there that I really can't be bothered trying to cleanup.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-powerpc/eeh.h')
-rw-r--r-- | include/asm-powerpc/eeh.h | 95 |
1 files changed, 5 insertions, 90 deletions
diff --git a/include/asm-powerpc/eeh.h b/include/asm-powerpc/eeh.h index 66481bbf270a..b886bec67016 100644 --- a/include/asm-powerpc/eeh.h +++ b/include/asm-powerpc/eeh.h | |||
@@ -169,104 +169,19 @@ static inline u64 eeh_readq_be(const volatile void __iomem *addr) | |||
169 | return val; | 169 | return val; |
170 | } | 170 | } |
171 | 171 | ||
172 | #define EEH_CHECK_ALIGN(v,a) \ | 172 | static inline void eeh_memcpy_fromio(void *dest, const |
173 | ((((unsigned long)(v)) & ((a) - 1)) == 0) | 173 | volatile void __iomem *src, |
174 | |||
175 | static inline void eeh_memset_io(volatile void __iomem *addr, int c, | ||
176 | unsigned long n) | ||
177 | { | ||
178 | void *p = (void __force *)addr; | ||
179 | u32 lc = c; | ||
180 | lc |= lc << 8; | ||
181 | lc |= lc << 16; | ||
182 | |||
183 | __asm__ __volatile__ ("sync" : : : "memory"); | ||
184 | while(n && !EEH_CHECK_ALIGN(p, 4)) { | ||
185 | *((volatile u8 *)p) = c; | ||
186 | p++; | ||
187 | n--; | ||
188 | } | ||
189 | while(n >= 4) { | ||
190 | *((volatile u32 *)p) = lc; | ||
191 | p += 4; | ||
192 | n -= 4; | ||
193 | } | ||
194 | while(n) { | ||
195 | *((volatile u8 *)p) = c; | ||
196 | p++; | ||
197 | n--; | ||
198 | } | ||
199 | __asm__ __volatile__ ("sync" : : : "memory"); | ||
200 | } | ||
201 | static inline void eeh_memcpy_fromio(void *dest, const volatile void __iomem *src, | ||
202 | unsigned long n) | 174 | unsigned long n) |
203 | { | 175 | { |
204 | void *vsrc = (void __force *) src; | 176 | _memcpy_fromio(dest, src, n); |
205 | void *destsave = dest; | ||
206 | unsigned long nsave = n; | ||
207 | |||
208 | __asm__ __volatile__ ("sync" : : : "memory"); | ||
209 | while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) { | ||
210 | *((u8 *)dest) = *((volatile u8 *)vsrc); | ||
211 | __asm__ __volatile__ ("eieio" : : : "memory"); | ||
212 | vsrc++; | ||
213 | dest++; | ||
214 | n--; | ||
215 | } | ||
216 | while(n > 4) { | ||
217 | *((u32 *)dest) = *((volatile u32 *)vsrc); | ||
218 | __asm__ __volatile__ ("eieio" : : : "memory"); | ||
219 | vsrc += 4; | ||
220 | dest += 4; | ||
221 | n -= 4; | ||
222 | } | ||
223 | while(n) { | ||
224 | *((u8 *)dest) = *((volatile u8 *)vsrc); | ||
225 | __asm__ __volatile__ ("eieio" : : : "memory"); | ||
226 | vsrc++; | ||
227 | dest++; | ||
228 | n--; | ||
229 | } | ||
230 | __asm__ __volatile__ ("sync" : : : "memory"); | ||
231 | 177 | ||
232 | /* Look for ffff's here at dest[n]. Assume that at least 4 bytes | 178 | /* Look for ffff's here at dest[n]. Assume that at least 4 bytes |
233 | * were copied. Check all four bytes. | 179 | * were copied. Check all four bytes. |
234 | */ | 180 | */ |
235 | if ((nsave >= 4) && | 181 | if (n >= 4 && EEH_POSSIBLE_ERROR(*((u32 *)(dest + n - 4)), u32)) |
236 | (EEH_POSSIBLE_ERROR((*((u32 *) destsave+nsave-4)), u32))) { | 182 | eeh_check_failure(src, *((u32 *)(dest + n - 4))); |
237 | eeh_check_failure(src, (*((u32 *) destsave+nsave-4))); | ||
238 | } | ||
239 | } | 183 | } |
240 | 184 | ||
241 | static inline void eeh_memcpy_toio(volatile void __iomem *dest, const void *src, | ||
242 | unsigned long n) | ||
243 | { | ||
244 | void *vdest = (void __force *) dest; | ||
245 | |||
246 | __asm__ __volatile__ ("sync" : : : "memory"); | ||
247 | while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) { | ||
248 | *((volatile u8 *)vdest) = *((u8 *)src); | ||
249 | src++; | ||
250 | vdest++; | ||
251 | n--; | ||
252 | } | ||
253 | while(n > 4) { | ||
254 | *((volatile u32 *)vdest) = *((volatile u32 *)src); | ||
255 | src += 4; | ||
256 | vdest += 4; | ||
257 | n-=4; | ||
258 | } | ||
259 | while(n) { | ||
260 | *((volatile u8 *)vdest) = *((u8 *)src); | ||
261 | src++; | ||
262 | vdest++; | ||
263 | n--; | ||
264 | } | ||
265 | __asm__ __volatile__ ("sync" : : : "memory"); | ||
266 | } | ||
267 | |||
268 | #undef EEH_CHECK_ALIGN | ||
269 | |||
270 | /* in-string eeh macros */ | 185 | /* in-string eeh macros */ |
271 | static inline void eeh_readsb(const volatile void __iomem *addr, void * buf, | 186 | static inline void eeh_readsb(const volatile void __iomem *addr, void * buf, |
272 | int ns) | 187 | int ns) |