aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/include/asm/irqflags.h
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2012-03-27 15:40:20 -0400
committerChris Metcalf <cmetcalf@tilera.com>2012-05-25 12:48:20 -0400
commit51007004f44c9588d70ffb77e1f52479bd5b0e37 (patch)
treeddf8dd2f83554ecbe9de0c690cfab3889308397b /arch/tile/include/asm/irqflags.h
parent76e10d158efb6d4516018846f60c2ab5501900bc (diff)
arch/tile: use interrupt critical sections less
In general we want to avoid ever touching memory while within an interrupt critical section, since the page fault path goes through a different path from the hypervisor when in an interrupt critical section, and we carefully decided with tilegx that we didn't need to support this path in the kernel. (On tilepro we did implement that path as part of supporting atomic instructions in software.) In practice we always need to touch the kernel stack, since that's where we store the interrupt state before releasing the critical section, but this change cleans up a few things. The IRQ_ENABLE macro is split up so that when we want to enable interrupts in a deferred way (e.g. for cpu_idle or for interrupt return) we can read the per-cpu enable mask before entering the critical section. The cache-migration code is changed to use interrupt masking instead of interrupt critical sections. And, the interrupt-entry code is changed so that we defer loading "tp" from per-cpu data until after we have released the interrupt critical section. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/include/asm/irqflags.h')
-rw-r--r--arch/tile/include/asm/irqflags.h34
1 files changed, 26 insertions, 8 deletions
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index 5db0ce54284d..b4e96fef2cf8 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -28,10 +28,10 @@
28 */ 28 */
29#if CHIP_HAS_AUX_PERF_COUNTERS() 29#if CHIP_HAS_AUX_PERF_COUNTERS()
30#define LINUX_MASKABLE_INTERRUPTS_HI \ 30#define LINUX_MASKABLE_INTERRUPTS_HI \
31 (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT))) 31 (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
32#else 32#else
33#define LINUX_MASKABLE_INTERRUPTS_HI \ 33#define LINUX_MASKABLE_INTERRUPTS_HI \
34 (~(INT_MASK_HI(INT_PERF_COUNT))) 34 (~(INT_MASK_HI(INT_PERF_COUNT)))
35#endif 35#endif
36 36
37#else 37#else
@@ -90,6 +90,14 @@
90 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \ 90 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
91 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \ 91 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
92} while (0) 92} while (0)
93#define interrupt_mask_save_mask() \
94 (__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_0) | \
95 (((unsigned long long)__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_1))<<32))
96#define interrupt_mask_restore_mask(mask) do { \
97 unsigned long long __m = (mask); \
98 __insn_mtspr(SPR_INTERRUPT_MASK_K_0, (unsigned long)(__m)); \
99 __insn_mtspr(SPR_INTERRUPT_MASK_K_1, (unsigned long)(__m>>32)); \
100} while (0)
93#else 101#else
94#define interrupt_mask_set(n) \ 102#define interrupt_mask_set(n) \
95 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n))) 103 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
@@ -101,6 +109,10 @@
101 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask)) 109 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
102#define interrupt_mask_reset_mask(mask) \ 110#define interrupt_mask_reset_mask(mask) \
103 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask)) 111 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
112#define interrupt_mask_save_mask() \
113 __insn_mfspr(SPR_INTERRUPT_MASK_K)
114#define interrupt_mask_restore_mask(mask) \
115 __insn_mtspr(SPR_INTERRUPT_MASK_K, (mask))
104#endif 116#endif
105 117
106/* 118/*
@@ -122,7 +134,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
122 134
123/* Disable all interrupts, including NMIs. */ 135/* Disable all interrupts, including NMIs. */
124#define arch_local_irq_disable_all() \ 136#define arch_local_irq_disable_all() \
125 interrupt_mask_set_mask(-1UL) 137 interrupt_mask_set_mask(-1ULL)
126 138
127/* Re-enable all maskable interrupts. */ 139/* Re-enable all maskable interrupts. */
128#define arch_local_irq_enable() \ 140#define arch_local_irq_enable() \
@@ -179,7 +191,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
179#ifdef __tilegx__ 191#ifdef __tilegx__
180 192
181#if INT_MEM_ERROR != 0 193#if INT_MEM_ERROR != 0
182# error Fix IRQ_DISABLED() macro 194# error Fix IRQS_DISABLED() macro
183#endif 195#endif
184 196
185/* Return 0 or 1 to indicate whether interrupts are currently disabled. */ 197/* Return 0 or 1 to indicate whether interrupts are currently disabled. */
@@ -207,9 +219,10 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
207 mtspr SPR_INTERRUPT_MASK_SET_K, tmp 219 mtspr SPR_INTERRUPT_MASK_SET_K, tmp
208 220
209/* Enable interrupts. */ 221/* Enable interrupts. */
210#define IRQ_ENABLE(tmp0, tmp1) \ 222#define IRQ_ENABLE_LOAD(tmp0, tmp1) \
211 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ 223 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
212 ld tmp0, tmp0; \ 224 ld tmp0, tmp0
225#define IRQ_ENABLE_APPLY(tmp0, tmp1) \
213 mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0 226 mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0
214 227
215#else /* !__tilegx__ */ 228#else /* !__tilegx__ */
@@ -253,17 +266,22 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
253 mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp 266 mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp
254 267
255/* Enable interrupts. */ 268/* Enable interrupts. */
256#define IRQ_ENABLE(tmp0, tmp1) \ 269#define IRQ_ENABLE_LOAD(tmp0, tmp1) \
257 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ 270 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
258 { \ 271 { \
259 lw tmp0, tmp0; \ 272 lw tmp0, tmp0; \
260 addi tmp1, tmp0, 4 \ 273 addi tmp1, tmp0, 4 \
261 }; \ 274 }; \
262 lw tmp1, tmp1; \ 275 lw tmp1, tmp1
276#define IRQ_ENABLE_APPLY(tmp0, tmp1) \
263 mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \ 277 mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \
264 mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1 278 mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1
265#endif 279#endif
266 280
281#define IRQ_ENABLE(tmp0, tmp1) \
282 IRQ_ENABLE_LOAD(tmp0, tmp1); \
283 IRQ_ENABLE_APPLY(tmp0, tmp1)
284
267/* 285/*
268 * Do the CPU's IRQ-state tracing from assembly code. We call a 286 * Do the CPU's IRQ-state tracing from assembly code. We call a
269 * C function, but almost everywhere we do, we don't mind clobbering 287 * C function, but almost everywhere we do, we don't mind clobbering