diff options
Diffstat (limited to 'include/linux/spinlock_up.h')
-rw-r--r-- | include/linux/spinlock_up.h | 29 |
1 files changed, 18 insertions, 11 deletions
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index a26e2fb604e6..e2369c167dbd 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h | |||
@@ -16,7 +16,10 @@ | |||
16 | * In the debug case, 1 means unlocked, 0 means locked. (the values | 16 | * In the debug case, 1 means unlocked, 0 means locked. (the values |
17 | * are inverted, to catch initialization bugs) | 17 | * are inverted, to catch initialization bugs) |
18 | * | 18 | * |
19 | * No atomicity anywhere, we are on UP. | 19 | * No atomicity anywhere, we are on UP. However, we still need |
20 | * the compiler barriers, because we do not want the compiler to | ||
21 | * move potentially faulting instructions (notably user accesses) | ||
22 | * into the locked sequence, resulting in non-atomic execution. | ||
20 | */ | 23 | */ |
21 | 24 | ||
22 | #ifdef CONFIG_DEBUG_SPINLOCK | 25 | #ifdef CONFIG_DEBUG_SPINLOCK |
@@ -25,6 +28,7 @@ | |||
25 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 28 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
26 | { | 29 | { |
27 | lock->slock = 0; | 30 | lock->slock = 0; |
31 | barrier(); | ||
28 | } | 32 | } |
29 | 33 | ||
30 | static inline void | 34 | static inline void |
@@ -32,6 +36,7 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) | |||
32 | { | 36 | { |
33 | local_irq_save(flags); | 37 | local_irq_save(flags); |
34 | lock->slock = 0; | 38 | lock->slock = 0; |
39 | barrier(); | ||
35 | } | 40 | } |
36 | 41 | ||
37 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 42 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
@@ -39,32 +44,34 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
39 | char oldval = lock->slock; | 44 | char oldval = lock->slock; |
40 | 45 | ||
41 | lock->slock = 0; | 46 | lock->slock = 0; |
47 | barrier(); | ||
42 | 48 | ||
43 | return oldval > 0; | 49 | return oldval > 0; |
44 | } | 50 | } |
45 | 51 | ||
46 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 52 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
47 | { | 53 | { |
54 | barrier(); | ||
48 | lock->slock = 1; | 55 | lock->slock = 1; |
49 | } | 56 | } |
50 | 57 | ||
51 | /* | 58 | /* |
52 | * Read-write spinlocks. No debug version. | 59 | * Read-write spinlocks. No debug version. |
53 | */ | 60 | */ |
54 | #define arch_read_lock(lock) do { (void)(lock); } while (0) | 61 | #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) |
55 | #define arch_write_lock(lock) do { (void)(lock); } while (0) | 62 | #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) |
56 | #define arch_read_trylock(lock) ({ (void)(lock); 1; }) | 63 | #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
57 | #define arch_write_trylock(lock) ({ (void)(lock); 1; }) | 64 | #define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
58 | #define arch_read_unlock(lock) do { (void)(lock); } while (0) | 65 | #define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) |
59 | #define arch_write_unlock(lock) do { (void)(lock); } while (0) | 66 | #define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) |
60 | 67 | ||
61 | #else /* DEBUG_SPINLOCK */ | 68 | #else /* DEBUG_SPINLOCK */ |
62 | #define arch_spin_is_locked(lock) ((void)(lock), 0) | 69 | #define arch_spin_is_locked(lock) ((void)(lock), 0) |
63 | /* for sched.c and kernel_lock.c: */ | 70 | /* for sched.c and kernel_lock.c: */ |
64 | # define arch_spin_lock(lock) do { (void)(lock); } while (0) | 71 | # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) |
65 | # define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) | 72 | # define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) |
66 | # define arch_spin_unlock(lock) do { (void)(lock); } while (0) | 73 | # define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0) |
67 | # define arch_spin_trylock(lock) ({ (void)(lock); 1; }) | 74 | # define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
68 | #endif /* DEBUG_SPINLOCK */ | 75 | #endif /* DEBUG_SPINLOCK */ |
69 | 76 | ||
70 | #define arch_spin_is_contended(lock) (((void)(lock), 0)) | 77 | #define arch_spin_is_contended(lock) (((void)(lock), 0)) |