diff options
author | Jeff Garzik <jeff@garzik.org> | 2006-09-19 00:42:13 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-09-19 00:42:13 -0400 |
commit | 4a3381feb823e06c8e2da7e283c17b0b6fdbddcf (patch) | |
tree | e1bef4c3db854bb10fd13dc67415d77b5d999533 /include/asm-powerpc | |
parent | fea63e38013ec628ab3f7fddc4c2148064b7910a (diff) | |
parent | 47a5c6fa0e204a2b63309c648bb2fde36836c826 (diff) |
Merge branch 'master' into upstream
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r-- | include/asm-powerpc/eeh.h | 3 | ||||
-rw-r--r-- | include/asm-powerpc/futex.h | 28 | ||||
-rw-r--r-- | include/asm-powerpc/io.h | 43 | ||||
-rw-r--r-- | include/asm-powerpc/kdump.h | 2 | ||||
-rw-r--r-- | include/asm-powerpc/paca.h | 1 | ||||
-rw-r--r-- | include/asm-powerpc/spinlock.h | 17 |
6 files changed, 77 insertions, 17 deletions
diff --git a/include/asm-powerpc/eeh.h b/include/asm-powerpc/eeh.h index 4df3e80118f4..6a784396660b 100644 --- a/include/asm-powerpc/eeh.h +++ b/include/asm-powerpc/eeh.h | |||
@@ -205,6 +205,7 @@ static inline void eeh_memset_io(volatile void __iomem *addr, int c, | |||
205 | lc |= lc << 8; | 205 | lc |= lc << 8; |
206 | lc |= lc << 16; | 206 | lc |= lc << 16; |
207 | 207 | ||
208 | __asm__ __volatile__ ("sync" : : : "memory"); | ||
208 | while(n && !EEH_CHECK_ALIGN(p, 4)) { | 209 | while(n && !EEH_CHECK_ALIGN(p, 4)) { |
209 | *((volatile u8 *)p) = c; | 210 | *((volatile u8 *)p) = c; |
210 | p++; | 211 | p++; |
@@ -229,6 +230,7 @@ static inline void eeh_memcpy_fromio(void *dest, const volatile void __iomem *sr | |||
229 | void *destsave = dest; | 230 | void *destsave = dest; |
230 | unsigned long nsave = n; | 231 | unsigned long nsave = n; |
231 | 232 | ||
233 | __asm__ __volatile__ ("sync" : : : "memory"); | ||
232 | while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) { | 234 | while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) { |
233 | *((u8 *)dest) = *((volatile u8 *)vsrc); | 235 | *((u8 *)dest) = *((volatile u8 *)vsrc); |
234 | __asm__ __volatile__ ("eieio" : : : "memory"); | 236 | __asm__ __volatile__ ("eieio" : : : "memory"); |
@@ -266,6 +268,7 @@ static inline void eeh_memcpy_toio(volatile void __iomem *dest, const void *src, | |||
266 | { | 268 | { |
267 | void *vdest = (void __force *) dest; | 269 | void *vdest = (void __force *) dest; |
268 | 270 | ||
271 | __asm__ __volatile__ ("sync" : : : "memory"); | ||
269 | while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) { | 272 | while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) { |
270 | *((volatile u8 *)vdest) = *((u8 *)src); | 273 | *((volatile u8 *)vdest) = *((u8 *)src); |
271 | src++; | 274 | src++; |
diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h index f1b3c00bc1ce..936422e54891 100644 --- a/include/asm-powerpc/futex.h +++ b/include/asm-powerpc/futex.h | |||
@@ -84,7 +84,33 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
84 | static inline int | 84 | static inline int |
85 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | 85 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) |
86 | { | 86 | { |
87 | return -ENOSYS; | 87 | int prev; |
88 | |||
89 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
90 | return -EFAULT; | ||
91 | |||
92 | __asm__ __volatile__ ( | ||
93 | LWSYNC_ON_SMP | ||
94 | "1: lwarx %0,0,%2 # futex_atomic_cmpxchg_inatomic\n\ | ||
95 | cmpw 0,%0,%3\n\ | ||
96 | bne- 3f\n" | ||
97 | PPC405_ERR77(0,%2) | ||
98 | "2: stwcx. %4,0,%2\n\ | ||
99 | bne- 1b\n" | ||
100 | ISYNC_ON_SMP | ||
101 | "3: .section .fixup,\"ax\"\n\ | ||
102 | 4: li %0,%5\n\ | ||
103 | b 3b\n\ | ||
104 | .previous\n\ | ||
105 | .section __ex_table,\"a\"\n\ | ||
106 | .align 3\n\ | ||
107 | " PPC_LONG "1b,4b,2b,4b\n\ | ||
108 | .previous" \ | ||
109 | : "=&r" (prev), "+m" (*uaddr) | ||
110 | : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT) | ||
111 | : "cc", "memory"); | ||
112 | |||
113 | return prev; | ||
88 | } | 114 | } |
89 | 115 | ||
90 | #endif /* __KERNEL__ */ | 116 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h index 36c4c34bf565..212428db0d8b 100644 --- a/include/asm-powerpc/io.h +++ b/include/asm-powerpc/io.h | |||
@@ -19,6 +19,7 @@ extern int check_legacy_ioport(unsigned long base_port); | |||
19 | #include <linux/compiler.h> | 19 | #include <linux/compiler.h> |
20 | #include <asm/page.h> | 20 | #include <asm/page.h> |
21 | #include <asm/byteorder.h> | 21 | #include <asm/byteorder.h> |
22 | #include <asm/paca.h> | ||
22 | #ifdef CONFIG_PPC_ISERIES | 23 | #ifdef CONFIG_PPC_ISERIES |
23 | #include <asm/iseries/iseries_io.h> | 24 | #include <asm/iseries/iseries_io.h> |
24 | #endif | 25 | #endif |
@@ -162,7 +163,11 @@ extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, int ns); | |||
162 | extern void _insl_ns(volatile u32 __iomem *port, void *buf, int nl); | 163 | extern void _insl_ns(volatile u32 __iomem *port, void *buf, int nl); |
163 | extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, int nl); | 164 | extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, int nl); |
164 | 165 | ||
165 | #define mmiowb() | 166 | static inline void mmiowb(void) |
167 | { | ||
168 | __asm__ __volatile__ ("sync" : : : "memory"); | ||
169 | get_paca()->io_sync = 0; | ||
170 | } | ||
166 | 171 | ||
167 | /* | 172 | /* |
168 | * output pause versions need a delay at least for the | 173 | * output pause versions need a delay at least for the |
@@ -278,22 +283,23 @@ static inline int in_8(const volatile unsigned char __iomem *addr) | |||
278 | { | 283 | { |
279 | int ret; | 284 | int ret; |
280 | 285 | ||
281 | __asm__ __volatile__("lbz%U1%X1 %0,%1; twi 0,%0,0; isync" | 286 | __asm__ __volatile__("sync; lbz%U1%X1 %0,%1; twi 0,%0,0; isync" |
282 | : "=r" (ret) : "m" (*addr)); | 287 | : "=r" (ret) : "m" (*addr)); |
283 | return ret; | 288 | return ret; |
284 | } | 289 | } |
285 | 290 | ||
286 | static inline void out_8(volatile unsigned char __iomem *addr, int val) | 291 | static inline void out_8(volatile unsigned char __iomem *addr, int val) |
287 | { | 292 | { |
288 | __asm__ __volatile__("stb%U0%X0 %1,%0; sync" | 293 | __asm__ __volatile__("sync; stb%U0%X0 %1,%0" |
289 | : "=m" (*addr) : "r" (val)); | 294 | : "=m" (*addr) : "r" (val)); |
295 | get_paca()->io_sync = 1; | ||
290 | } | 296 | } |
291 | 297 | ||
292 | static inline int in_le16(const volatile unsigned short __iomem *addr) | 298 | static inline int in_le16(const volatile unsigned short __iomem *addr) |
293 | { | 299 | { |
294 | int ret; | 300 | int ret; |
295 | 301 | ||
296 | __asm__ __volatile__("lhbrx %0,0,%1; twi 0,%0,0; isync" | 302 | __asm__ __volatile__("sync; lhbrx %0,0,%1; twi 0,%0,0; isync" |
297 | : "=r" (ret) : "r" (addr), "m" (*addr)); | 303 | : "=r" (ret) : "r" (addr), "m" (*addr)); |
298 | return ret; | 304 | return ret; |
299 | } | 305 | } |
@@ -302,28 +308,30 @@ static inline int in_be16(const volatile unsigned short __iomem *addr) | |||
302 | { | 308 | { |
303 | int ret; | 309 | int ret; |
304 | 310 | ||
305 | __asm__ __volatile__("lhz%U1%X1 %0,%1; twi 0,%0,0; isync" | 311 | __asm__ __volatile__("sync; lhz%U1%X1 %0,%1; twi 0,%0,0; isync" |
306 | : "=r" (ret) : "m" (*addr)); | 312 | : "=r" (ret) : "m" (*addr)); |
307 | return ret; | 313 | return ret; |
308 | } | 314 | } |
309 | 315 | ||
310 | static inline void out_le16(volatile unsigned short __iomem *addr, int val) | 316 | static inline void out_le16(volatile unsigned short __iomem *addr, int val) |
311 | { | 317 | { |
312 | __asm__ __volatile__("sthbrx %1,0,%2; sync" | 318 | __asm__ __volatile__("sync; sthbrx %1,0,%2" |
313 | : "=m" (*addr) : "r" (val), "r" (addr)); | 319 | : "=m" (*addr) : "r" (val), "r" (addr)); |
320 | get_paca()->io_sync = 1; | ||
314 | } | 321 | } |
315 | 322 | ||
316 | static inline void out_be16(volatile unsigned short __iomem *addr, int val) | 323 | static inline void out_be16(volatile unsigned short __iomem *addr, int val) |
317 | { | 324 | { |
318 | __asm__ __volatile__("sth%U0%X0 %1,%0; sync" | 325 | __asm__ __volatile__("sync; sth%U0%X0 %1,%0" |
319 | : "=m" (*addr) : "r" (val)); | 326 | : "=m" (*addr) : "r" (val)); |
327 | get_paca()->io_sync = 1; | ||
320 | } | 328 | } |
321 | 329 | ||
322 | static inline unsigned in_le32(const volatile unsigned __iomem *addr) | 330 | static inline unsigned in_le32(const volatile unsigned __iomem *addr) |
323 | { | 331 | { |
324 | unsigned ret; | 332 | unsigned ret; |
325 | 333 | ||
326 | __asm__ __volatile__("lwbrx %0,0,%1; twi 0,%0,0; isync" | 334 | __asm__ __volatile__("sync; lwbrx %0,0,%1; twi 0,%0,0; isync" |
327 | : "=r" (ret) : "r" (addr), "m" (*addr)); | 335 | : "=r" (ret) : "r" (addr), "m" (*addr)); |
328 | return ret; | 336 | return ret; |
329 | } | 337 | } |
@@ -332,21 +340,23 @@ static inline unsigned in_be32(const volatile unsigned __iomem *addr) | |||
332 | { | 340 | { |
333 | unsigned ret; | 341 | unsigned ret; |
334 | 342 | ||
335 | __asm__ __volatile__("lwz%U1%X1 %0,%1; twi 0,%0,0; isync" | 343 | __asm__ __volatile__("sync; lwz%U1%X1 %0,%1; twi 0,%0,0; isync" |
336 | : "=r" (ret) : "m" (*addr)); | 344 | : "=r" (ret) : "m" (*addr)); |
337 | return ret; | 345 | return ret; |
338 | } | 346 | } |
339 | 347 | ||
340 | static inline void out_le32(volatile unsigned __iomem *addr, int val) | 348 | static inline void out_le32(volatile unsigned __iomem *addr, int val) |
341 | { | 349 | { |
342 | __asm__ __volatile__("stwbrx %1,0,%2; sync" : "=m" (*addr) | 350 | __asm__ __volatile__("sync; stwbrx %1,0,%2" : "=m" (*addr) |
343 | : "r" (val), "r" (addr)); | 351 | : "r" (val), "r" (addr)); |
352 | get_paca()->io_sync = 1; | ||
344 | } | 353 | } |
345 | 354 | ||
346 | static inline void out_be32(volatile unsigned __iomem *addr, int val) | 355 | static inline void out_be32(volatile unsigned __iomem *addr, int val) |
347 | { | 356 | { |
348 | __asm__ __volatile__("stw%U0%X0 %1,%0; sync" | 357 | __asm__ __volatile__("sync; stw%U0%X0 %1,%0" |
349 | : "=m" (*addr) : "r" (val)); | 358 | : "=m" (*addr) : "r" (val)); |
359 | get_paca()->io_sync = 1; | ||
350 | } | 360 | } |
351 | 361 | ||
352 | static inline unsigned long in_le64(const volatile unsigned long __iomem *addr) | 362 | static inline unsigned long in_le64(const volatile unsigned long __iomem *addr) |
@@ -354,6 +364,7 @@ static inline unsigned long in_le64(const volatile unsigned long __iomem *addr) | |||
354 | unsigned long tmp, ret; | 364 | unsigned long tmp, ret; |
355 | 365 | ||
356 | __asm__ __volatile__( | 366 | __asm__ __volatile__( |
367 | "sync\n" | ||
357 | "ld %1,0(%2)\n" | 368 | "ld %1,0(%2)\n" |
358 | "twi 0,%1,0\n" | 369 | "twi 0,%1,0\n" |
359 | "isync\n" | 370 | "isync\n" |
@@ -372,7 +383,7 @@ static inline unsigned long in_be64(const volatile unsigned long __iomem *addr) | |||
372 | { | 383 | { |
373 | unsigned long ret; | 384 | unsigned long ret; |
374 | 385 | ||
375 | __asm__ __volatile__("ld%U1%X1 %0,%1; twi 0,%0,0; isync" | 386 | __asm__ __volatile__("sync; ld%U1%X1 %0,%1; twi 0,%0,0; isync" |
376 | : "=r" (ret) : "m" (*addr)); | 387 | : "=r" (ret) : "m" (*addr)); |
377 | return ret; | 388 | return ret; |
378 | } | 389 | } |
@@ -389,14 +400,16 @@ static inline void out_le64(volatile unsigned long __iomem *addr, unsigned long | |||
389 | "rldicl %1,%1,32,0\n" | 400 | "rldicl %1,%1,32,0\n" |
390 | "rlwimi %0,%1,8,8,31\n" | 401 | "rlwimi %0,%1,8,8,31\n" |
391 | "rlwimi %0,%1,24,16,23\n" | 402 | "rlwimi %0,%1,24,16,23\n" |
392 | "std %0,0(%3)\n" | 403 | "sync\n" |
393 | "sync" | 404 | "std %0,0(%3)" |
394 | : "=&r" (tmp) , "=&r" (val) : "1" (val) , "b" (addr) , "m" (*addr)); | 405 | : "=&r" (tmp) , "=&r" (val) : "1" (val) , "b" (addr) , "m" (*addr)); |
406 | get_paca()->io_sync = 1; | ||
395 | } | 407 | } |
396 | 408 | ||
397 | static inline void out_be64(volatile unsigned long __iomem *addr, unsigned long val) | 409 | static inline void out_be64(volatile unsigned long __iomem *addr, unsigned long val) |
398 | { | 410 | { |
399 | __asm__ __volatile__("std%U0%X0 %1,%0; sync" : "=m" (*addr) : "r" (val)); | 411 | __asm__ __volatile__("sync; std%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); |
412 | get_paca()->io_sync = 1; | ||
400 | } | 413 | } |
401 | 414 | ||
402 | #ifndef CONFIG_PPC_ISERIES | 415 | #ifndef CONFIG_PPC_ISERIES |
diff --git a/include/asm-powerpc/kdump.h b/include/asm-powerpc/kdump.h index dc1574c945f8..10e8eb1e6f4f 100644 --- a/include/asm-powerpc/kdump.h +++ b/include/asm-powerpc/kdump.h | |||
@@ -7,7 +7,7 @@ | |||
7 | /* How many bytes to reserve at zero for kdump. The reserve limit should | 7 | /* How many bytes to reserve at zero for kdump. The reserve limit should |
8 | * be greater or equal to the trampoline's end address. | 8 | * be greater or equal to the trampoline's end address. |
9 | * Reserve to the end of the FWNMI area, see head_64.S */ | 9 | * Reserve to the end of the FWNMI area, see head_64.S */ |
10 | #define KDUMP_RESERVE_LIMIT 0x8000 | 10 | #define KDUMP_RESERVE_LIMIT 0x10000 /* 64K */ |
11 | 11 | ||
12 | #ifdef CONFIG_CRASH_DUMP | 12 | #ifdef CONFIG_CRASH_DUMP |
13 | 13 | ||
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h index 2d4585f06209..3d5d590bc4b0 100644 --- a/include/asm-powerpc/paca.h +++ b/include/asm-powerpc/paca.h | |||
@@ -93,6 +93,7 @@ struct paca_struct { | |||
93 | u64 saved_r1; /* r1 save for RTAS calls */ | 93 | u64 saved_r1; /* r1 save for RTAS calls */ |
94 | u64 saved_msr; /* MSR saved here by enter_rtas */ | 94 | u64 saved_msr; /* MSR saved here by enter_rtas */ |
95 | u8 proc_enabled; /* irq soft-enable flag */ | 95 | u8 proc_enabled; /* irq soft-enable flag */ |
96 | u8 io_sync; /* writel() needs spin_unlock sync */ | ||
96 | 97 | ||
97 | /* Stuff for accurate time accounting */ | 98 | /* Stuff for accurate time accounting */ |
98 | u64 user_time; /* accumulated usermode TB ticks */ | 99 | u64 user_time; /* accumulated usermode TB ticks */ |
diff --git a/include/asm-powerpc/spinlock.h b/include/asm-powerpc/spinlock.h index 895cb6d3a42a..c31e4382a775 100644 --- a/include/asm-powerpc/spinlock.h +++ b/include/asm-powerpc/spinlock.h | |||
@@ -36,6 +36,19 @@ | |||
36 | #define LOCK_TOKEN 1 | 36 | #define LOCK_TOKEN 1 |
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | #if defined(CONFIG_PPC64) && defined(CONFIG_SMP) | ||
40 | #define CLEAR_IO_SYNC (get_paca()->io_sync = 0) | ||
41 | #define SYNC_IO do { \ | ||
42 | if (unlikely(get_paca()->io_sync)) { \ | ||
43 | mb(); \ | ||
44 | get_paca()->io_sync = 0; \ | ||
45 | } \ | ||
46 | } while (0) | ||
47 | #else | ||
48 | #define CLEAR_IO_SYNC | ||
49 | #define SYNC_IO | ||
50 | #endif | ||
51 | |||
39 | /* | 52 | /* |
40 | * This returns the old value in the lock, so we succeeded | 53 | * This returns the old value in the lock, so we succeeded |
41 | * in getting the lock if the return value is 0. | 54 | * in getting the lock if the return value is 0. |
@@ -61,6 +74,7 @@ static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) | |||
61 | 74 | ||
62 | static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) | 75 | static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) |
63 | { | 76 | { |
77 | CLEAR_IO_SYNC; | ||
64 | return __spin_trylock(lock) == 0; | 78 | return __spin_trylock(lock) == 0; |
65 | } | 79 | } |
66 | 80 | ||
@@ -91,6 +105,7 @@ extern void __rw_yield(raw_rwlock_t *lock); | |||
91 | 105 | ||
92 | static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) | 106 | static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) |
93 | { | 107 | { |
108 | CLEAR_IO_SYNC; | ||
94 | while (1) { | 109 | while (1) { |
95 | if (likely(__spin_trylock(lock) == 0)) | 110 | if (likely(__spin_trylock(lock) == 0)) |
96 | break; | 111 | break; |
@@ -107,6 +122,7 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long | |||
107 | { | 122 | { |
108 | unsigned long flags_dis; | 123 | unsigned long flags_dis; |
109 | 124 | ||
125 | CLEAR_IO_SYNC; | ||
110 | while (1) { | 126 | while (1) { |
111 | if (likely(__spin_trylock(lock) == 0)) | 127 | if (likely(__spin_trylock(lock) == 0)) |
112 | break; | 128 | break; |
@@ -124,6 +140,7 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long | |||
124 | 140 | ||
125 | static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) | 141 | static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) |
126 | { | 142 | { |
143 | SYNC_IO; | ||
127 | __asm__ __volatile__("# __raw_spin_unlock\n\t" | 144 | __asm__ __volatile__("# __raw_spin_unlock\n\t" |
128 | LWSYNC_ON_SMP: : :"memory"); | 145 | LWSYNC_ON_SMP: : :"memory"); |
129 | lock->slock = 0; | 146 | lock->slock = 0; |