diff options
author | Hugh Dickins <hugh@veritas.com> | 2006-11-10 16:32:40 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-12-04 04:39:20 -0500 |
commit | ef2b343e99e772e35f0f9d00f7db318b6629c16e (patch) | |
tree | 386f9b6b1376ab99a478ab5d394572fca720d0ef | |
parent | 56291e19e37cf3bb8fc701ebf3aa8ffbf59f73ef (diff) |
[POWERPC] Make soft_enabled irqs preempt safe
Rewrite local_get_flags and local_irq_disable to use r13 explicitly,
to avoid the risk that gcc will split get_paca()->soft_enabled into a
sequence unsafe against preemption. Similar care in local_irq_restore.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r-- | arch/powerpc/kernel/irq.c | 57 | ||||
-rw-r--r-- | include/asm-powerpc/hw_irq.h | 20 |
2 files changed, 67 insertions, 10 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index e1936952017c..0bd8c7665834 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -97,22 +97,69 @@ EXPORT_SYMBOL(irq_desc); | |||
97 | 97 | ||
98 | int distribute_irqs = 1; | 98 | int distribute_irqs = 1; |
99 | 99 | ||
100 | static inline unsigned long get_hard_enabled(void) | ||
101 | { | ||
102 | unsigned long enabled; | ||
103 | |||
104 | __asm__ __volatile__("lbz %0,%1(13)" | ||
105 | : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); | ||
106 | |||
107 | return enabled; | ||
108 | } | ||
109 | |||
110 | static inline void set_soft_enabled(unsigned long enable) | ||
111 | { | ||
112 | __asm__ __volatile__("stb %0,%1(13)" | ||
113 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); | ||
114 | } | ||
115 | |||
100 | void local_irq_restore(unsigned long en) | 116 | void local_irq_restore(unsigned long en) |
101 | { | 117 | { |
102 | get_paca()->soft_enabled = en; | 118 | /* |
119 | * get_paca()->soft_enabled = en; | ||
120 | * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? | ||
121 | * That was allowed before, and in such a case we do need to take care | ||
122 | * that gcc will set soft_enabled directly via r13, not choose to use | ||
123 | * an intermediate register, lest we're preempted to a different cpu. | ||
124 | */ | ||
125 | set_soft_enabled(en); | ||
103 | if (!en) | 126 | if (!en) |
104 | return; | 127 | return; |
105 | 128 | ||
106 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { | 129 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { |
107 | if (get_paca()->lppaca_ptr->int_dword.any_int) | 130 | /* |
131 | * Do we need to disable preemption here? Not really: in the | ||
132 | * unlikely event that we're preempted to a different cpu in | ||
133 | * between getting r13, loading its lppaca_ptr, and loading | ||
134 | * its any_int, we might call iseries_handle_interrupts without | ||
135 | * an interrupt pending on the new cpu, but that's no disaster, | ||
136 | * is it? And the business of preempting us off the old cpu | ||
137 | * would itself involve a local_irq_restore which handles the | ||
138 | * interrupt to that cpu. | ||
139 | * | ||
140 | * But use "local_paca->lppaca_ptr" instead of "get_lppaca()" | ||
141 | * to avoid any preemption checking added into get_paca(). | ||
142 | */ | ||
143 | if (local_paca->lppaca_ptr->int_dword.any_int) | ||
108 | iseries_handle_interrupts(); | 144 | iseries_handle_interrupts(); |
109 | return; | 145 | return; |
110 | } | 146 | } |
111 | 147 | ||
112 | if (get_paca()->hard_enabled) | 148 | /* |
149 | * if (get_paca()->hard_enabled) return; | ||
150 | * But again we need to take care that gcc gets hard_enabled directly | ||
151 | * via r13, not choose to use an intermediate register, lest we're | ||
152 | * preempted to a different cpu in between the two instructions. | ||
153 | */ | ||
154 | if (get_hard_enabled()) | ||
113 | return; | 155 | return; |
114 | /* need to hard-enable interrupts here */ | 156 | |
115 | get_paca()->hard_enabled = en; | 157 | /* |
158 | * Need to hard-enable interrupts here. Since currently disabled, | ||
159 | * no need to take further asm precautions against preemption; but | ||
160 | * use local_paca instead of get_paca() to avoid preemption checking. | ||
161 | */ | ||
162 | local_paca->hard_enabled = en; | ||
116 | if ((int)mfspr(SPRN_DEC) < 0) | 163 | if ((int)mfspr(SPRN_DEC) < 0) |
117 | mtspr(SPRN_DEC, 1); | 164 | mtspr(SPRN_DEC, 1); |
118 | hard_irq_enable(); | 165 | hard_irq_enable(); |
diff --git a/include/asm-powerpc/hw_irq.h b/include/asm-powerpc/hw_irq.h index c4a1ab608f6f..fd3f2a206271 100644 --- a/include/asm-powerpc/hw_irq.h +++ b/include/asm-powerpc/hw_irq.h | |||
@@ -18,15 +18,25 @@ extern void timer_interrupt(struct pt_regs *); | |||
18 | 18 | ||
19 | static inline unsigned long local_get_flags(void) | 19 | static inline unsigned long local_get_flags(void) |
20 | { | 20 | { |
21 | return get_paca()->soft_enabled; | 21 | unsigned long flags; |
22 | |||
23 | __asm__ __volatile__("lbz %0,%1(13)" | ||
24 | : "=r" (flags) | ||
25 | : "i" (offsetof(struct paca_struct, soft_enabled))); | ||
26 | |||
27 | return flags; | ||
22 | } | 28 | } |
23 | 29 | ||
24 | static inline unsigned long local_irq_disable(void) | 30 | static inline unsigned long local_irq_disable(void) |
25 | { | 31 | { |
26 | unsigned long flag = get_paca()->soft_enabled; | 32 | unsigned long flags, zero; |
27 | get_paca()->soft_enabled = 0; | 33 | |
28 | barrier(); | 34 | __asm__ __volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)" |
29 | return flag; | 35 | : "=r" (flags), "=&r" (zero) |
36 | : "i" (offsetof(struct paca_struct, soft_enabled)) | ||
37 | : "memory"); | ||
38 | |||
39 | return flags; | ||
30 | } | 40 | } |
31 | 41 | ||
32 | extern void local_irq_restore(unsigned long); | 42 | extern void local_irq_restore(unsigned long); |