diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/preempt.h | 22 | ||||
-rw-r--r-- | include/linux/spinlock_up.h | 29 |
2 files changed, 32 insertions, 19 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 5a710b9c578e..87a03c746f17 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
@@ -93,14 +93,20 @@ do { \ | |||
93 | 93 | ||
94 | #else /* !CONFIG_PREEMPT_COUNT */ | 94 | #else /* !CONFIG_PREEMPT_COUNT */ |
95 | 95 | ||
96 | #define preempt_disable() do { } while (0) | 96 | /* |
97 | #define sched_preempt_enable_no_resched() do { } while (0) | 97 | * Even if we don't have any preemption, we need preempt disable/enable |
98 | #define preempt_enable_no_resched() do { } while (0) | 98 | * to be barriers, so that we don't have things like get_user/put_user |
99 | #define preempt_enable() do { } while (0) | 99 | * that can cause faults and scheduling migrate into our preempt-protected |
100 | 100 | * region. | |
101 | #define preempt_disable_notrace() do { } while (0) | 101 | */ |
102 | #define preempt_enable_no_resched_notrace() do { } while (0) | 102 | #define preempt_disable() barrier() |
103 | #define preempt_enable_notrace() do { } while (0) | 103 | #define sched_preempt_enable_no_resched() barrier() |
104 | #define preempt_enable_no_resched() barrier() | ||
105 | #define preempt_enable() barrier() | ||
106 | |||
107 | #define preempt_disable_notrace() barrier() | ||
108 | #define preempt_enable_no_resched_notrace() barrier() | ||
109 | #define preempt_enable_notrace() barrier() | ||
104 | 110 | ||
105 | #endif /* CONFIG_PREEMPT_COUNT */ | 111 | #endif /* CONFIG_PREEMPT_COUNT */ |
106 | 112 | ||
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index a26e2fb604e6..e2369c167dbd 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h | |||
@@ -16,7 +16,10 @@ | |||
16 | * In the debug case, 1 means unlocked, 0 means locked. (the values | 16 | * In the debug case, 1 means unlocked, 0 means locked. (the values |
17 | * are inverted, to catch initialization bugs) | 17 | * are inverted, to catch initialization bugs) |
18 | * | 18 | * |
19 | * No atomicity anywhere, we are on UP. | 19 | * No atomicity anywhere, we are on UP. However, we still need |
20 | * the compiler barriers, because we do not want the compiler to | ||
21 | * move potentially faulting instructions (notably user accesses) | ||
22 | * into the locked sequence, resulting in non-atomic execution. | ||
20 | */ | 23 | */ |
21 | 24 | ||
22 | #ifdef CONFIG_DEBUG_SPINLOCK | 25 | #ifdef CONFIG_DEBUG_SPINLOCK |
@@ -25,6 +28,7 @@ | |||
25 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 28 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
26 | { | 29 | { |
27 | lock->slock = 0; | 30 | lock->slock = 0; |
31 | barrier(); | ||
28 | } | 32 | } |
29 | 33 | ||
30 | static inline void | 34 | static inline void |
@@ -32,6 +36,7 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) | |||
32 | { | 36 | { |
33 | local_irq_save(flags); | 37 | local_irq_save(flags); |
34 | lock->slock = 0; | 38 | lock->slock = 0; |
39 | barrier(); | ||
35 | } | 40 | } |
36 | 41 | ||
37 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 42 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
@@ -39,32 +44,34 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
39 | char oldval = lock->slock; | 44 | char oldval = lock->slock; |
40 | 45 | ||
41 | lock->slock = 0; | 46 | lock->slock = 0; |
47 | barrier(); | ||
42 | 48 | ||
43 | return oldval > 0; | 49 | return oldval > 0; |
44 | } | 50 | } |
45 | 51 | ||
46 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 52 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
47 | { | 53 | { |
54 | barrier(); | ||
48 | lock->slock = 1; | 55 | lock->slock = 1; |
49 | } | 56 | } |
50 | 57 | ||
51 | /* | 58 | /* |
52 | * Read-write spinlocks. No debug version. | 59 | * Read-write spinlocks. No debug version. |
53 | */ | 60 | */ |
54 | #define arch_read_lock(lock) do { (void)(lock); } while (0) | 61 | #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) |
55 | #define arch_write_lock(lock) do { (void)(lock); } while (0) | 62 | #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) |
56 | #define arch_read_trylock(lock) ({ (void)(lock); 1; }) | 63 | #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
57 | #define arch_write_trylock(lock) ({ (void)(lock); 1; }) | 64 | #define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
58 | #define arch_read_unlock(lock) do { (void)(lock); } while (0) | 65 | #define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) |
59 | #define arch_write_unlock(lock) do { (void)(lock); } while (0) | 66 | #define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) |
60 | 67 | ||
61 | #else /* DEBUG_SPINLOCK */ | 68 | #else /* DEBUG_SPINLOCK */ |
62 | #define arch_spin_is_locked(lock) ((void)(lock), 0) | 69 | #define arch_spin_is_locked(lock) ((void)(lock), 0) |
63 | /* for sched.c and kernel_lock.c: */ | 70 | /* for sched.c and kernel_lock.c: */ |
64 | # define arch_spin_lock(lock) do { (void)(lock); } while (0) | 71 | # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) |
65 | # define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) | 72 | # define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) |
66 | # define arch_spin_unlock(lock) do { (void)(lock); } while (0) | 73 | # define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0) |
67 | # define arch_spin_trylock(lock) ({ (void)(lock); 1; }) | 74 | # define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
68 | #endif /* DEBUG_SPINLOCK */ | 75 | #endif /* DEBUG_SPINLOCK */ |
69 | 76 | ||
70 | #define arch_spin_is_contended(lock) (((void)(lock), 0)) | 77 | #define arch_spin_is_contended(lock) (((void)(lock), 0)) |