aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/include/asm/irqflags.h67
-rw-r--r--arch/alpha/include/asm/system.h28
-rw-r--r--arch/arm/include/asm/irqflags.h145
-rw-r--r--arch/avr32/include/asm/irqflags.h29
-rw-r--r--arch/blackfin/include/asm/irqflags.h12
-rw-r--r--arch/blackfin/kernel/trace.c1
-rw-r--r--arch/cris/include/arch-v10/arch/irqflags.h45
-rw-r--r--arch/cris/include/arch-v10/arch/system.h16
-rw-r--r--arch/cris/include/arch-v32/arch/irqflags.h46
-rw-r--r--arch/cris/include/arch-v32/arch/system.h22
-rw-r--r--arch/cris/include/asm/irqflags.h1
-rw-r--r--arch/cris/include/asm/system.h1
-rw-r--r--arch/frv/include/asm/irqflags.h158
-rw-r--r--arch/frv/include/asm/system.h136
-rw-r--r--arch/h8300/include/asm/irqflags.h43
-rw-r--r--arch/h8300/include/asm/system.h24
-rw-r--r--arch/ia64/include/asm/irqflags.h94
-rw-r--r--arch/ia64/include/asm/system.h76
-rw-r--r--arch/m32r/include/asm/irqflags.h104
-rw-r--r--arch/m32r/include/asm/system.h66
-rw-r--r--arch/m68k/include/asm/entry_no.h2
-rw-r--r--arch/m68k/include/asm/irqflags.h76
-rw-r--r--arch/m68k/include/asm/system_mm.h25
-rw-r--r--arch/m68k/include/asm/system_no.h57
-rw-r--r--arch/m68knommu/kernel/asm-offsets.c2
-rw-r--r--arch/m68knommu/platform/coldfire/head.S1
-rw-r--r--arch/microblaze/include/asm/irqflags.h193
-rw-r--r--arch/mips/include/asm/irqflags.h53
-rw-r--r--arch/mips/kernel/smtc.c4
-rw-r--r--arch/mn10300/include/asm/irqflags.h123
-rw-r--r--arch/mn10300/include/asm/system.h109
-rw-r--r--arch/mn10300/kernel/entry.S1
-rw-r--r--arch/parisc/include/asm/irqflags.h46
-rw-r--r--arch/parisc/include/asm/system.h19
-rw-r--r--arch/powerpc/include/asm/hw_irq.h113
-rw-r--r--arch/powerpc/include/asm/irqflags.h2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S4
-rw-r--r--arch/powerpc/kernel/irq.c4
-rw-r--r--arch/s390/include/asm/irqflags.h51
-rw-r--r--arch/s390/include/asm/system.h2
-rw-r--r--arch/s390/kernel/mem_detect.c4
-rw-r--r--arch/s390/mm/init.c3
-rw-r--r--arch/s390/mm/maccess.c4
-rw-r--r--arch/score/include/asm/irqflags.h187
-rw-r--r--arch/sh/include/asm/irqflags.h4
-rw-r--r--arch/sh/kernel/irq_32.c12
-rw-r--r--arch/sparc/include/asm/irqflags_32.h35
-rw-r--r--arch/sparc/include/asm/irqflags_64.h29
-rw-r--r--arch/sparc/kernel/irq_32.c13
-rw-r--r--arch/sparc/prom/p1275.c2
-rw-r--r--arch/tile/include/asm/irqflags.h36
-rw-r--r--arch/x86/include/asm/irqflags.h32
-rw-r--r--arch/x86/include/asm/paravirt.h16
-rw-r--r--arch/x86/xen/spinlock.c2
-rw-r--r--arch/xtensa/include/asm/irqflags.h58
-rw-r--r--arch/xtensa/include/asm/system.h33
-rw-r--r--drivers/s390/char/sclp.c2
-rw-r--r--include/asm-generic/atomic.h5
-rw-r--r--include/asm-generic/cmpxchg-local.h1
-rw-r--r--include/asm-generic/hardirq.h1
-rw-r--r--include/asm-generic/irqflags.h52
-rw-r--r--include/linux/irqflags.h107
-rw-r--r--include/linux/spinlock.h1
63 files changed, 1482 insertions, 1158 deletions
diff --git a/arch/alpha/include/asm/irqflags.h b/arch/alpha/include/asm/irqflags.h
new file mode 100644
index 000000000000..299bbc7e9d71
--- /dev/null
+++ b/arch/alpha/include/asm/irqflags.h
@@ -0,0 +1,67 @@
1#ifndef __ALPHA_IRQFLAGS_H
2#define __ALPHA_IRQFLAGS_H
3
4#include <asm/system.h>
5
6#define IPL_MIN 0
7#define IPL_SW0 1
8#define IPL_SW1 2
9#define IPL_DEV0 3
10#define IPL_DEV1 4
11#define IPL_TIMER 5
12#define IPL_PERF 6
13#define IPL_POWERFAIL 6
14#define IPL_MCHECK 7
15#define IPL_MAX 7
16
17#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
18#undef IPL_MIN
19#define IPL_MIN __min_ipl
20extern int __min_ipl;
21#endif
22
23#define getipl() (rdps() & 7)
24#define setipl(ipl) ((void) swpipl(ipl))
25
26static inline unsigned long arch_local_save_flags(void)
27{
28 return rdps();
29}
30
31static inline void arch_local_irq_disable(void)
32{
33 setipl(IPL_MAX);
34 barrier();
35}
36
37static inline unsigned long arch_local_irq_save(void)
38{
39 unsigned long flags = swpipl(IPL_MAX);
40 barrier();
41 return flags;
42}
43
44static inline void arch_local_irq_enable(void)
45{
46 barrier();
47 setipl(IPL_MIN);
48}
49
50static inline void arch_local_irq_restore(unsigned long flags)
51{
52 barrier();
53 setipl(flags);
54 barrier();
55}
56
57static inline bool arch_irqs_disabled_flags(unsigned long flags)
58{
59 return flags == IPL_MAX;
60}
61
62static inline bool arch_irqs_disabled(void)
63{
64 return arch_irqs_disabled_flags(getipl());
65}
66
67#endif /* __ALPHA_IRQFLAGS_H */
diff --git a/arch/alpha/include/asm/system.h b/arch/alpha/include/asm/system.h
index 5aa40cca4f23..9f78e6934637 100644
--- a/arch/alpha/include/asm/system.h
+++ b/arch/alpha/include/asm/system.h
@@ -259,34 +259,6 @@ __CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long);
259__CALL_PAL_W1(wrusp, unsigned long); 259__CALL_PAL_W1(wrusp, unsigned long);
260__CALL_PAL_W1(wrvptptr, unsigned long); 260__CALL_PAL_W1(wrvptptr, unsigned long);
261 261
262#define IPL_MIN 0
263#define IPL_SW0 1
264#define IPL_SW1 2
265#define IPL_DEV0 3
266#define IPL_DEV1 4
267#define IPL_TIMER 5
268#define IPL_PERF 6
269#define IPL_POWERFAIL 6
270#define IPL_MCHECK 7
271#define IPL_MAX 7
272
273#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
274#undef IPL_MIN
275#define IPL_MIN __min_ipl
276extern int __min_ipl;
277#endif
278
279#define getipl() (rdps() & 7)
280#define setipl(ipl) ((void) swpipl(ipl))
281
282#define local_irq_disable() do { setipl(IPL_MAX); barrier(); } while(0)
283#define local_irq_enable() do { barrier(); setipl(IPL_MIN); } while(0)
284#define local_save_flags(flags) ((flags) = rdps())
285#define local_irq_save(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0)
286#define local_irq_restore(flags) do { barrier(); setipl(flags); barrier(); } while(0)
287
288#define irqs_disabled() (getipl() == IPL_MAX)
289
290/* 262/*
291 * TB routines.. 263 * TB routines..
292 */ 264 */
diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h
index 6d09974e6646..1e6cca55c750 100644
--- a/arch/arm/include/asm/irqflags.h
+++ b/arch/arm/include/asm/irqflags.h
@@ -10,66 +10,85 @@
10 */ 10 */
11#if __LINUX_ARM_ARCH__ >= 6 11#if __LINUX_ARM_ARCH__ >= 6
12 12
13#define raw_local_irq_save(x) \ 13static inline unsigned long arch_local_irq_save(void)
14 ({ \ 14{
15 __asm__ __volatile__( \ 15 unsigned long flags;
16 "mrs %0, cpsr @ local_irq_save\n" \ 16
17 "cpsid i" \ 17 asm volatile(
18 : "=r" (x) : : "memory", "cc"); \ 18 " mrs %0, cpsr @ arch_local_irq_save\n"
19 }) 19 " cpsid i"
20 : "=r" (flags) : : "memory", "cc");
21 return flags;
22}
23
24static inline void arch_local_irq_enable(void)
25{
26 asm volatile(
27 " cpsie i @ arch_local_irq_enable"
28 :
29 :
30 : "memory", "cc");
31}
32
33static inline void arch_local_irq_disable(void)
34{
35 asm volatile(
36 " cpsid i @ arch_local_irq_disable"
37 :
38 :
39 : "memory", "cc");
40}
20 41
21#define raw_local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc")
22#define raw_local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc")
23#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") 42#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
24#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") 43#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
25
26#else 44#else
27 45
28/* 46/*
29 * Save the current interrupt enable state & disable IRQs 47 * Save the current interrupt enable state & disable IRQs
30 */ 48 */
31#define raw_local_irq_save(x) \ 49static inline unsigned long arch_local_irq_save(void)
32 ({ \ 50{
33 unsigned long temp; \ 51 unsigned long flags, temp;
34 (void) (&temp == &x); \ 52
35 __asm__ __volatile__( \ 53 asm volatile(
36 "mrs %0, cpsr @ local_irq_save\n" \ 54 " mrs %0, cpsr @ arch_local_irq_save\n"
37" orr %1, %0, #128\n" \ 55 " orr %1, %0, #128\n"
38" msr cpsr_c, %1" \ 56 " msr cpsr_c, %1"
39 : "=r" (x), "=r" (temp) \ 57 : "=r" (flags), "=r" (temp)
40 : \ 58 :
41 : "memory", "cc"); \ 59 : "memory", "cc");
42 }) 60 return flags;
43 61}
62
44/* 63/*
45 * Enable IRQs 64 * Enable IRQs
46 */ 65 */
47#define raw_local_irq_enable() \ 66static inline void arch_local_irq_enable(void)
48 ({ \ 67{
49 unsigned long temp; \ 68 unsigned long temp;
50 __asm__ __volatile__( \ 69 asm volatile(
51 "mrs %0, cpsr @ local_irq_enable\n" \ 70 " mrs %0, cpsr @ arch_local_irq_enable\n"
52" bic %0, %0, #128\n" \ 71 " bic %0, %0, #128\n"
53" msr cpsr_c, %0" \ 72 " msr cpsr_c, %0"
54 : "=r" (temp) \ 73 : "=r" (temp)
55 : \ 74 :
56 : "memory", "cc"); \ 75 : "memory", "cc");
57 }) 76}
58 77
59/* 78/*
60 * Disable IRQs 79 * Disable IRQs
61 */ 80 */
62#define raw_local_irq_disable() \ 81static inline void arch_local_irq_disable(void)
63 ({ \ 82{
64 unsigned long temp; \ 83 unsigned long temp;
65 __asm__ __volatile__( \ 84 asm volatile(
66 "mrs %0, cpsr @ local_irq_disable\n" \ 85 " mrs %0, cpsr @ arch_local_irq_disable\n"
67" orr %0, %0, #128\n" \ 86 " orr %0, %0, #128\n"
68" msr cpsr_c, %0" \ 87 " msr cpsr_c, %0"
69 : "=r" (temp) \ 88 : "=r" (temp)
70 : \ 89 :
71 : "memory", "cc"); \ 90 : "memory", "cc");
72 }) 91}
73 92
74/* 93/*
75 * Enable FIQs 94 * Enable FIQs
@@ -106,27 +125,31 @@
106/* 125/*
107 * Save the current interrupt enable state. 126 * Save the current interrupt enable state.
108 */ 127 */
109#define raw_local_save_flags(x) \ 128static inline unsigned long arch_local_save_flags(void)
110 ({ \ 129{
111 __asm__ __volatile__( \ 130 unsigned long flags;
112 "mrs %0, cpsr @ local_save_flags" \ 131 asm volatile(
113 : "=r" (x) : : "memory", "cc"); \ 132 " mrs %0, cpsr @ local_save_flags"
114 }) 133 : "=r" (flags) : : "memory", "cc");
134 return flags;
135}
115 136
116/* 137/*
117 * restore saved IRQ & FIQ state 138 * restore saved IRQ & FIQ state
118 */ 139 */
119#define raw_local_irq_restore(x) \ 140static inline void arch_local_irq_restore(unsigned long flags)
120 __asm__ __volatile__( \ 141{
121 "msr cpsr_c, %0 @ local_irq_restore\n" \ 142 asm volatile(
122 : \ 143 " msr cpsr_c, %0 @ local_irq_restore"
123 : "r" (x) \ 144 :
124 : "memory", "cc") 145 : "r" (flags)
146 : "memory", "cc");
147}
125 148
126#define raw_irqs_disabled_flags(flags) \ 149static inline int arch_irqs_disabled_flags(unsigned long flags)
127({ \ 150{
128 (int)((flags) & PSR_I_BIT); \ 151 return flags & PSR_I_BIT;
129}) 152}
130 153
131#endif 154#endif
132#endif 155#endif
diff --git a/arch/avr32/include/asm/irqflags.h b/arch/avr32/include/asm/irqflags.h
index 93570daac38a..006e9487372d 100644
--- a/arch/avr32/include/asm/irqflags.h
+++ b/arch/avr32/include/asm/irqflags.h
@@ -8,16 +8,14 @@
8#ifndef __ASM_AVR32_IRQFLAGS_H 8#ifndef __ASM_AVR32_IRQFLAGS_H
9#define __ASM_AVR32_IRQFLAGS_H 9#define __ASM_AVR32_IRQFLAGS_H
10 10
11#include <linux/types.h>
11#include <asm/sysreg.h> 12#include <asm/sysreg.h>
12 13
13static inline unsigned long __raw_local_save_flags(void) 14static inline unsigned long arch_local_save_flags(void)
14{ 15{
15 return sysreg_read(SR); 16 return sysreg_read(SR);
16} 17}
17 18
18#define raw_local_save_flags(x) \
19 do { (x) = __raw_local_save_flags(); } while (0)
20
21/* 19/*
22 * This will restore ALL status register flags, not only the interrupt 20 * This will restore ALL status register flags, not only the interrupt
23 * mask flag. 21 * mask flag.
@@ -25,44 +23,39 @@ static inline unsigned long __raw_local_save_flags(void)
25 * The empty asm statement informs the compiler of this fact while 23 * The empty asm statement informs the compiler of this fact while
26 * also serving as a barrier. 24 * also serving as a barrier.
27 */ 25 */
28static inline void raw_local_irq_restore(unsigned long flags) 26static inline void arch_local_irq_restore(unsigned long flags)
29{ 27{
30 sysreg_write(SR, flags); 28 sysreg_write(SR, flags);
31 asm volatile("" : : : "memory", "cc"); 29 asm volatile("" : : : "memory", "cc");
32} 30}
33 31
34static inline void raw_local_irq_disable(void) 32static inline void arch_local_irq_disable(void)
35{ 33{
36 asm volatile("ssrf %0" : : "n"(SYSREG_GM_OFFSET) : "memory"); 34 asm volatile("ssrf %0" : : "n"(SYSREG_GM_OFFSET) : "memory");
37} 35}
38 36
39static inline void raw_local_irq_enable(void) 37static inline void arch_local_irq_enable(void)
40{ 38{
41 asm volatile("csrf %0" : : "n"(SYSREG_GM_OFFSET) : "memory"); 39 asm volatile("csrf %0" : : "n"(SYSREG_GM_OFFSET) : "memory");
42} 40}
43 41
44static inline int raw_irqs_disabled_flags(unsigned long flags) 42static inline bool arch_irqs_disabled_flags(unsigned long flags)
45{ 43{
46 return (flags & SYSREG_BIT(GM)) != 0; 44 return (flags & SYSREG_BIT(GM)) != 0;
47} 45}
48 46
49static inline int raw_irqs_disabled(void) 47static inline bool arch_irqs_disabled(void)
50{ 48{
51 unsigned long flags = __raw_local_save_flags(); 49 return arch_irqs_disabled_flags(arch_local_save_flags());
52
53 return raw_irqs_disabled_flags(flags);
54} 50}
55 51
56static inline unsigned long __raw_local_irq_save(void) 52static inline unsigned long arch_local_irq_save(void)
57{ 53{
58 unsigned long flags = __raw_local_save_flags(); 54 unsigned long flags = arch_local_save_flags();
59 55
60 raw_local_irq_disable(); 56 arch_local_irq_disable();
61 57
62 return flags; 58 return flags;
63} 59}
64 60
65#define raw_local_irq_save(flags) \
66 do { (flags) = __raw_local_irq_save(); } while (0)
67
68#endif /* __ASM_AVR32_IRQFLAGS_H */ 61#endif /* __ASM_AVR32_IRQFLAGS_H */
diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h
index 994d76791016..41c4d70544ef 100644
--- a/arch/blackfin/include/asm/irqflags.h
+++ b/arch/blackfin/include/asm/irqflags.h
@@ -218,16 +218,4 @@ static inline void hard_local_irq_restore(unsigned long flags)
218 218
219 219
220#endif /* !CONFIG_IPIPE */ 220#endif /* !CONFIG_IPIPE */
221
222/*
223 * Raw interface to linux/irqflags.h.
224 */
225#define raw_local_save_flags(flags) do { (flags) = arch_local_save_flags(); } while (0)
226#define raw_local_irq_save(flags) do { (flags) = arch_local_irq_save(); } while (0)
227#define raw_local_irq_restore(flags) arch_local_irq_restore(flags)
228#define raw_local_irq_enable() arch_local_irq_enable()
229#define raw_local_irq_disable() arch_local_irq_disable()
230#define raw_irqs_disabled_flags(flags) arch_irqs_disabled_flags(flags)
231#define raw_irqs_disabled() arch_irqs_disabled()
232
233#endif 221#endif
diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c
index 59fcdf6b0138..05b550891ce5 100644
--- a/arch/blackfin/kernel/trace.c
+++ b/arch/blackfin/kernel/trace.c
@@ -15,6 +15,7 @@
15#include <linux/kallsyms.h> 15#include <linux/kallsyms.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/irq.h>
18#include <asm/dma.h> 19#include <asm/dma.h>
19#include <asm/trace.h> 20#include <asm/trace.h>
20#include <asm/fixed_code.h> 21#include <asm/fixed_code.h>
diff --git a/arch/cris/include/arch-v10/arch/irqflags.h b/arch/cris/include/arch-v10/arch/irqflags.h
new file mode 100644
index 000000000000..75ef18991240
--- /dev/null
+++ b/arch/cris/include/arch-v10/arch/irqflags.h
@@ -0,0 +1,45 @@
1#ifndef __ASM_CRIS_ARCH_IRQFLAGS_H
2#define __ASM_CRIS_ARCH_IRQFLAGS_H
3
4#include <linux/types.h>
5
6static inline unsigned long arch_local_save_flags(void)
7{
8 unsigned long flags;
9 asm volatile("move $ccr,%0" : "=rm" (flags) : : "memory");
10 return flags;
11}
12
13static inline void arch_local_irq_disable(void)
14{
15 asm volatile("di" : : : "memory");
16}
17
18static inline void arch_local_irq_enable(void)
19{
20 asm volatile("ei" : : : "memory");
21}
22
23static inline unsigned long arch_local_irq_save(void)
24{
25 unsigned long flags = arch_local_save_flags();
26 arch_local_irq_disable();
27 return flags;
28}
29
30static inline void arch_local_irq_restore(unsigned long flags)
31{
32 asm volatile("move %0,$ccr" : : "rm" (flags) : "memory");
33}
34
35static inline bool arch_irqs_disabled_flags(unsigned long flags)
36{
37 return !(flags & (1 << 5));
38}
39
40static inline bool arch_irqs_disabled(void)
41{
42 return arch_irqs_disabled_flags(arch_local_save_flags());
43}
44
45#endif /* __ASM_CRIS_ARCH_IRQFLAGS_H */
diff --git a/arch/cris/include/arch-v10/arch/system.h b/arch/cris/include/arch-v10/arch/system.h
index 4a9cd36c9e16..935fde34aa15 100644
--- a/arch/cris/include/arch-v10/arch/system.h
+++ b/arch/cris/include/arch-v10/arch/system.h
@@ -44,20 +44,4 @@ static inline unsigned long _get_base(char * addr)
44struct __xchg_dummy { unsigned long a[100]; }; 44struct __xchg_dummy { unsigned long a[100]; };
45#define __xg(x) ((struct __xchg_dummy *)(x)) 45#define __xg(x) ((struct __xchg_dummy *)(x))
46 46
47/* interrupt control.. */
48#define local_save_flags(x) __asm__ __volatile__ ("move $ccr,%0" : "=rm" (x) : : "memory");
49#define local_irq_restore(x) __asm__ __volatile__ ("move %0,$ccr" : : "rm" (x) : "memory");
50#define local_irq_disable() __asm__ __volatile__ ( "di" : : :"memory");
51#define local_irq_enable() __asm__ __volatile__ ( "ei" : : :"memory");
52
53#define irqs_disabled() \
54({ \
55 unsigned long flags; \
56 local_save_flags(flags); \
57 !(flags & (1<<5)); \
58})
59
60/* For spinlocks etc */
61#define local_irq_save(x) __asm__ __volatile__ ("move $ccr,%0\n\tdi" : "=rm" (x) : : "memory");
62
63#endif 47#endif
diff --git a/arch/cris/include/arch-v32/arch/irqflags.h b/arch/cris/include/arch-v32/arch/irqflags.h
new file mode 100644
index 000000000000..041851f8ec6f
--- /dev/null
+++ b/arch/cris/include/arch-v32/arch/irqflags.h
@@ -0,0 +1,46 @@
1#ifndef __ASM_CRIS_ARCH_IRQFLAGS_H
2#define __ASM_CRIS_ARCH_IRQFLAGS_H
3
4#include <linux/types.h>
5#include <arch/ptrace.h>
6
7static inline unsigned long arch_local_save_flags(void)
8{
9 unsigned long flags;
10 asm volatile("move $ccs,%0" : "=rm" (flags) : : "memory");
11 return flags;
12}
13
14static inline void arch_local_irq_disable(void)
15{
16 asm volatile("di" : : : "memory");
17}
18
19static inline void arch_local_irq_enable(void)
20{
21 asm volatile("ei" : : : "memory");
22}
23
24static inline unsigned long arch_local_irq_save(void)
25{
26 unsigned long flags = arch_local_save_flags();
27 arch_local_irq_disable();
28 return flags;
29}
30
31static inline void arch_local_irq_restore(unsigned long flags)
32{
33 asm volatile("move %0,$ccs" : : "rm" (flags) : "memory");
34}
35
36static inline bool arch_irqs_disabled_flags(unsigned long flags)
37{
38 return !(flags & (1 << I_CCS_BITNR));
39}
40
41static inline bool arch_irqs_disabled(void)
42{
43 return arch_irqs_disabled_flags(arch_local_save_flags());
44}
45
46#endif /* __ASM_CRIS_ARCH_IRQFLAGS_H */
diff --git a/arch/cris/include/arch-v32/arch/system.h b/arch/cris/include/arch-v32/arch/system.h
index 6ca90f1f110a..76cea99eaa60 100644
--- a/arch/cris/include/arch-v32/arch/system.h
+++ b/arch/cris/include/arch-v32/arch/system.h
@@ -44,26 +44,4 @@ static inline unsigned long rdsp(void)
44struct __xchg_dummy { unsigned long a[100]; }; 44struct __xchg_dummy { unsigned long a[100]; };
45#define __xg(x) ((struct __xchg_dummy *)(x)) 45#define __xg(x) ((struct __xchg_dummy *)(x))
46 46
47/* Used for interrupt control. */
48#define local_save_flags(x) \
49 __asm__ __volatile__ ("move $ccs, %0" : "=rm" (x) : : "memory");
50
51#define local_irq_restore(x) \
52 __asm__ __volatile__ ("move %0, $ccs" : : "rm" (x) : "memory");
53
54#define local_irq_disable() __asm__ __volatile__ ("di" : : : "memory");
55#define local_irq_enable() __asm__ __volatile__ ("ei" : : : "memory");
56
57#define irqs_disabled() \
58({ \
59 unsigned long flags; \
60 \
61 local_save_flags(flags);\
62 !(flags & (1 << I_CCS_BITNR)); \
63})
64
65/* Used for spinlocks, etc. */
66#define local_irq_save(x) \
67 __asm__ __volatile__ ("move $ccs, %0\n\tdi" : "=rm" (x) : : "memory");
68
69#endif /* _ASM_CRIS_ARCH_SYSTEM_H */ 47#endif /* _ASM_CRIS_ARCH_SYSTEM_H */
diff --git a/arch/cris/include/asm/irqflags.h b/arch/cris/include/asm/irqflags.h
new file mode 100644
index 000000000000..943ba5ca6d2c
--- /dev/null
+++ b/arch/cris/include/asm/irqflags.h
@@ -0,0 +1 @@
#include <arch/irqflags.h>
diff --git a/arch/cris/include/asm/system.h b/arch/cris/include/asm/system.h
index 8657b084a922..ea10592f7d75 100644
--- a/arch/cris/include/asm/system.h
+++ b/arch/cris/include/asm/system.h
@@ -1,6 +1,7 @@
1#ifndef __ASM_CRIS_SYSTEM_H 1#ifndef __ASM_CRIS_SYSTEM_H
2#define __ASM_CRIS_SYSTEM_H 2#define __ASM_CRIS_SYSTEM_H
3 3
4#include <linux/irqflags.h>
4#include <arch/system.h> 5#include <arch/system.h>
5 6
6/* the switch_to macro calls resume, an asm function in entry.S which does the actual 7/* the switch_to macro calls resume, an asm function in entry.S which does the actual
diff --git a/arch/frv/include/asm/irqflags.h b/arch/frv/include/asm/irqflags.h
new file mode 100644
index 000000000000..82f0b5363f42
--- /dev/null
+++ b/arch/frv/include/asm/irqflags.h
@@ -0,0 +1,158 @@
1/* FR-V interrupt handling
2 *
3 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#ifndef _ASM_IRQFLAGS_H
13#define _ASM_IRQFLAGS_H
14
15/*
16 * interrupt flag manipulation
17 * - use virtual interrupt management since touching the PSR is slow
18 * - ICC2.Z: T if interrupts virtually disabled
19 * - ICC2.C: F if interrupts really disabled
20 * - if Z==1 upon interrupt:
21 * - C is set to 0
22 * - interrupts are really disabled
23 * - entry.S returns immediately
24 * - uses TIHI (TRAP if Z==0 && C==0) #2 to really reenable interrupts
25 * - if taken, the trap:
26 * - sets ICC2.C
27 * - enables interrupts
28 */
29static inline void arch_local_irq_disable(void)
30{
31 /* set Z flag, but don't change the C flag */
32 asm volatile(" andcc gr0,gr0,gr0,icc2 \n"
33 :
34 :
35 : "memory", "icc2"
36 );
37}
38
39static inline void arch_local_irq_enable(void)
40{
41 /* clear Z flag and then test the C flag */
42 asm volatile(" oricc gr0,#1,gr0,icc2 \n"
43 " tihi icc2,gr0,#2 \n"
44 :
45 :
46 : "memory", "icc2"
47 );
48}
49
50static inline unsigned long arch_local_save_flags(void)
51{
52 unsigned long flags;
53
54 asm volatile("movsg ccr,%0"
55 : "=r"(flags)
56 :
57 : "memory");
58
59 /* shift ICC2.Z to bit 0 */
60 flags >>= 26;
61
62 /* make flags 1 if interrupts disabled, 0 otherwise */
63 return flags & 1UL;
64
65}
66
67static inline unsigned long arch_local_irq_save(void)
68{
69 unsigned long flags = arch_local_save_flags();
70 arch_local_irq_disable();
71 return flags;
72}
73
74static inline void arch_local_irq_restore(unsigned long flags)
75{
76 /* load the Z flag by turning 1 if disabled into 0 if disabled
77 * and thus setting the Z flag but not the C flag */
78 asm volatile(" xoricc %0,#1,gr0,icc2 \n"
79 /* then trap if Z=0 and C=0 */
80 " tihi icc2,gr0,#2 \n"
81 :
82 : "r"(flags)
83 : "memory", "icc2"
84 );
85
86}
87
88static inline bool arch_irqs_disabled_flags(unsigned long flags)
89{
90 return flags;
91}
92
93static inline bool arch_irqs_disabled(void)
94{
95 return arch_irqs_disabled_flags(arch_local_save_flags());
96}
97
98/*
99 * real interrupt flag manipulation
100 */
101#define __arch_local_irq_disable() \
102do { \
103 unsigned long psr; \
104 asm volatile(" movsg psr,%0 \n" \
105 " andi %0,%2,%0 \n" \
106 " ori %0,%1,%0 \n" \
107 " movgs %0,psr \n" \
108 : "=r"(psr) \
109 : "i" (PSR_PIL_14), "i" (~PSR_PIL) \
110 : "memory"); \
111} while (0)
112
113#define __arch_local_irq_enable() \
114do { \
115 unsigned long psr; \
116 asm volatile(" movsg psr,%0 \n" \
117 " andi %0,%1,%0 \n" \
118 " movgs %0,psr \n" \
119 : "=r"(psr) \
120 : "i" (~PSR_PIL) \
121 : "memory"); \
122} while (0)
123
124#define __arch_local_save_flags(flags) \
125do { \
126 typecheck(unsigned long, flags); \
127 asm("movsg psr,%0" \
128 : "=r"(flags) \
129 : \
130 : "memory"); \
131} while (0)
132
133#define __arch_local_irq_save(flags) \
134do { \
135 unsigned long npsr; \
136 typecheck(unsigned long, flags); \
137 asm volatile(" movsg psr,%0 \n" \
138 " andi %0,%3,%1 \n" \
139 " ori %1,%2,%1 \n" \
140 " movgs %1,psr \n" \
141 : "=r"(flags), "=r"(npsr) \
142 : "i" (PSR_PIL_14), "i" (~PSR_PIL) \
143 : "memory"); \
144} while (0)
145
146#define __arch_local_irq_restore(flags) \
147do { \
148 typecheck(unsigned long, flags); \
149 asm volatile(" movgs %0,psr \n" \
150 : \
151 : "r" (flags) \
152 : "memory"); \
153} while (0)
154
155#define __arch_irqs_disabled() \
156 ((__get_PSR() & PSR_PIL) >= PSR_PIL_14)
157
158#endif /* _ASM_IRQFLAGS_H */
diff --git a/arch/frv/include/asm/system.h b/arch/frv/include/asm/system.h
index efd22d9077ac..0a6d8d9ca45b 100644
--- a/arch/frv/include/asm/system.h
+++ b/arch/frv/include/asm/system.h
@@ -37,142 +37,6 @@ do { \
37} while(0) 37} while(0)
38 38
39/* 39/*
40 * interrupt flag manipulation
41 * - use virtual interrupt management since touching the PSR is slow
42 * - ICC2.Z: T if interrupts virtually disabled
43 * - ICC2.C: F if interrupts really disabled
44 * - if Z==1 upon interrupt:
45 * - C is set to 0
46 * - interrupts are really disabled
47 * - entry.S returns immediately
48 * - uses TIHI (TRAP if Z==0 && C==0) #2 to really reenable interrupts
49 * - if taken, the trap:
50 * - sets ICC2.C
51 * - enables interrupts
52 */
53#define local_irq_disable() \
54do { \
55 /* set Z flag, but don't change the C flag */ \
56 asm volatile(" andcc gr0,gr0,gr0,icc2 \n" \
57 : \
58 : \
59 : "memory", "icc2" \
60 ); \
61} while(0)
62
63#define local_irq_enable() \
64do { \
65 /* clear Z flag and then test the C flag */ \
66 asm volatile(" oricc gr0,#1,gr0,icc2 \n" \
67 " tihi icc2,gr0,#2 \n" \
68 : \
69 : \
70 : "memory", "icc2" \
71 ); \
72} while(0)
73
74#define local_save_flags(flags) \
75do { \
76 typecheck(unsigned long, flags); \
77 asm volatile("movsg ccr,%0" \
78 : "=r"(flags) \
79 : \
80 : "memory"); \
81 \
82 /* shift ICC2.Z to bit 0 */ \
83 flags >>= 26; \
84 \
85 /* make flags 1 if interrupts disabled, 0 otherwise */ \
86 flags &= 1UL; \
87} while(0)
88
89#define irqs_disabled() \
90 ({unsigned long flags; local_save_flags(flags); !!flags; })
91
92#define local_irq_save(flags) \
93do { \
94 typecheck(unsigned long, flags); \
95 local_save_flags(flags); \
96 local_irq_disable(); \
97} while(0)
98
99#define local_irq_restore(flags) \
100do { \
101 typecheck(unsigned long, flags); \
102 \
103 /* load the Z flag by turning 1 if disabled into 0 if disabled \
104 * and thus setting the Z flag but not the C flag */ \
105 asm volatile(" xoricc %0,#1,gr0,icc2 \n" \
106 /* then test Z=0 and C=0 */ \
107 " tihi icc2,gr0,#2 \n" \
108 : \
109 : "r"(flags) \
110 : "memory", "icc2" \
111 ); \
112 \
113} while(0)
114
115/*
116 * real interrupt flag manipulation
117 */
118#define __local_irq_disable() \
119do { \
120 unsigned long psr; \
121 asm volatile(" movsg psr,%0 \n" \
122 " andi %0,%2,%0 \n" \
123 " ori %0,%1,%0 \n" \
124 " movgs %0,psr \n" \
125 : "=r"(psr) \
126 : "i" (PSR_PIL_14), "i" (~PSR_PIL) \
127 : "memory"); \
128} while(0)
129
130#define __local_irq_enable() \
131do { \
132 unsigned long psr; \
133 asm volatile(" movsg psr,%0 \n" \
134 " andi %0,%1,%0 \n" \
135 " movgs %0,psr \n" \
136 : "=r"(psr) \
137 : "i" (~PSR_PIL) \
138 : "memory"); \
139} while(0)
140
141#define __local_save_flags(flags) \
142do { \
143 typecheck(unsigned long, flags); \
144 asm("movsg psr,%0" \
145 : "=r"(flags) \
146 : \
147 : "memory"); \
148} while(0)
149
150#define __local_irq_save(flags) \
151do { \
152 unsigned long npsr; \
153 typecheck(unsigned long, flags); \
154 asm volatile(" movsg psr,%0 \n" \
155 " andi %0,%3,%1 \n" \
156 " ori %1,%2,%1 \n" \
157 " movgs %1,psr \n" \
158 : "=r"(flags), "=r"(npsr) \
159 : "i" (PSR_PIL_14), "i" (~PSR_PIL) \
160 : "memory"); \
161} while(0)
162
163#define __local_irq_restore(flags) \
164do { \
165 typecheck(unsigned long, flags); \
166 asm volatile(" movgs %0,psr \n" \
167 : \
168 : "r" (flags) \
169 : "memory"); \
170} while(0)
171
172#define __irqs_disabled() \
173 ((__get_PSR() & PSR_PIL) >= PSR_PIL_14)
174
175/*
176 * Force strict CPU ordering. 40 * Force strict CPU ordering.
177 */ 41 */
178#define nop() asm volatile ("nop"::) 42#define nop() asm volatile ("nop"::)
diff --git a/arch/h8300/include/asm/irqflags.h b/arch/h8300/include/asm/irqflags.h
new file mode 100644
index 000000000000..9617cd57aebd
--- /dev/null
+++ b/arch/h8300/include/asm/irqflags.h
@@ -0,0 +1,43 @@
1#ifndef _H8300_IRQFLAGS_H
2#define _H8300_IRQFLAGS_H
3
4static inline unsigned long arch_local_save_flags(void)
5{
6 unsigned long flags;
7 asm volatile ("stc ccr,%w0" : "=r" (flags));
8 return flags;
9}
10
11static inline void arch_local_irq_disable(void)
12{
13 asm volatile ("orc #0x80,ccr" : : : "memory");
14}
15
16static inline void arch_local_irq_enable(void)
17{
18 asm volatile ("andc #0x7f,ccr" : : : "memory");
19}
20
21static inline unsigned long arch_local_irq_save(void)
22{
23 unsigned long flags = arch_local_save_flags();
24 arch_local_irq_disable();
25 return flags;
26}
27
28static inline void arch_local_irq_restore(unsigned long flags)
29{
30 asm volatile ("ldc %w0,ccr" : : "r" (flags) : "memory");
31}
32
33static inline bool arch_irqs_disabled_flags(unsigned long flags)
34{
35 return (flags & 0x80) == 0x80;
36}
37
38static inline bool arch_irqs_disabled(void)
39{
40 return arch_irqs_disabled_flags(arch_local_save_flags());
41}
42
43#endif /* _H8300_IRQFLAGS_H */
diff --git a/arch/h8300/include/asm/system.h b/arch/h8300/include/asm/system.h
index 16bf1560ff68..2c2382e50d93 100644
--- a/arch/h8300/include/asm/system.h
+++ b/arch/h8300/include/asm/system.h
@@ -2,6 +2,7 @@
2#define _H8300_SYSTEM_H 2#define _H8300_SYSTEM_H
3 3
4#include <linux/linkage.h> 4#include <linux/linkage.h>
5#include <linux/irqflags.h>
5 6
6struct pt_regs; 7struct pt_regs;
7 8
@@ -51,31 +52,8 @@ asmlinkage void resume(void);
51 (last) = _last; \ 52 (last) = _last; \
52} 53}
53 54
54#define __sti() asm volatile ("andc #0x7f,ccr")
55#define __cli() asm volatile ("orc #0x80,ccr")
56
57#define __save_flags(x) \
58 asm volatile ("stc ccr,%w0":"=r" (x))
59
60#define __restore_flags(x) \
61 asm volatile ("ldc %w0,ccr": :"r" (x))
62
63#define irqs_disabled() \
64({ \
65 unsigned char flags; \
66 __save_flags(flags); \
67 ((flags & 0x80) == 0x80); \
68})
69
70#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") 55#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
71 56
72/* For spinlocks etc */
73#define local_irq_disable() __cli()
74#define local_irq_enable() __sti()
75#define local_irq_save(x) ({ __save_flags(x); local_irq_disable(); })
76#define local_irq_restore(x) __restore_flags(x)
77#define local_save_flags(x) __save_flags(x)
78
79/* 57/*
80 * Force strict CPU ordering. 58 * Force strict CPU ordering.
81 * Not really required on H8... 59 * Not really required on H8...
diff --git a/arch/ia64/include/asm/irqflags.h b/arch/ia64/include/asm/irqflags.h
new file mode 100644
index 000000000000..f82d6be2ecd2
--- /dev/null
+++ b/arch/ia64/include/asm/irqflags.h
@@ -0,0 +1,94 @@
1/*
2 * IRQ flags defines.
3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
7 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
8 */
9
10#ifndef _ASM_IA64_IRQFLAGS_H
11#define _ASM_IA64_IRQFLAGS_H
12
13#ifdef CONFIG_IA64_DEBUG_IRQ
14extern unsigned long last_cli_ip;
15static inline void arch_maybe_save_ip(unsigned long flags)
16{
17 if (flags & IA64_PSR_I)
18 last_cli_ip = ia64_getreg(_IA64_REG_IP);
19}
20#else
21#define arch_maybe_save_ip(flags) do {} while (0)
22#endif
23
24/*
25 * - clearing psr.i is implicitly serialized (visible by next insn)
26 * - setting psr.i requires data serialization
27 * - we need a stop-bit before reading PSR because we sometimes
28 * write a floating-point register right before reading the PSR
29 * and that writes to PSR.mfl
30 */
31
32static inline unsigned long arch_local_save_flags(void)
33{
34 ia64_stop();
35#ifdef CONFIG_PARAVIRT
36 return ia64_get_psr_i();
37#else
38 return ia64_getreg(_IA64_REG_PSR);
39#endif
40}
41
42static inline unsigned long arch_local_irq_save(void)
43{
44 unsigned long flags = arch_local_save_flags();
45
46 ia64_stop();
47 ia64_rsm(IA64_PSR_I);
48 arch_maybe_save_ip(flags);
49 return flags;
50}
51
52static inline void arch_local_irq_disable(void)
53{
54#ifdef CONFIG_IA64_DEBUG_IRQ
55 arch_local_irq_save();
56#else
57 ia64_stop();
58 ia64_rsm(IA64_PSR_I);
59#endif
60}
61
62static inline void arch_local_irq_enable(void)
63{
64 ia64_stop();
65 ia64_ssm(IA64_PSR_I);
66 ia64_srlz_d();
67}
68
69static inline void arch_local_irq_restore(unsigned long flags)
70{
71#ifdef CONFIG_IA64_DEBUG_IRQ
72 unsigned long old_psr = arch_local_save_flags();
73#endif
74 ia64_intrin_local_irq_restore(flags & IA64_PSR_I);
75 arch_maybe_save_ip(old_psr & ~flags);
76}
77
78static inline bool arch_irqs_disabled_flags(unsigned long flags)
79{
80 return (flags & IA64_PSR_I) == 0;
81}
82
83static inline bool arch_irqs_disabled(void)
84{
85 return arch_irqs_disabled_flags(arch_local_save_flags());
86}
87
88static inline void arch_safe_halt(void)
89{
90 ia64_pal_halt_light(); /* PAL_HALT_LIGHT */
91}
92
93
94#endif /* _ASM_IA64_IRQFLAGS_H */
diff --git a/arch/ia64/include/asm/system.h b/arch/ia64/include/asm/system.h
index 9f342a574ce8..2feb7f64c035 100644
--- a/arch/ia64/include/asm/system.h
+++ b/arch/ia64/include/asm/system.h
@@ -107,87 +107,11 @@ extern struct ia64_boot_param {
107 */ 107 */
108#define set_mb(var, value) do { (var) = (value); mb(); } while (0) 108#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
109 109
110#define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */
111
112/* 110/*
113 * The group barrier in front of the rsm & ssm are necessary to ensure 111 * The group barrier in front of the rsm & ssm are necessary to ensure
114 * that none of the previous instructions in the same group are 112 * that none of the previous instructions in the same group are
115 * affected by the rsm/ssm. 113 * affected by the rsm/ssm.
116 */ 114 */
117/* For spinlocks etc */
118
119/*
120 * - clearing psr.i is implicitly serialized (visible by next insn)
121 * - setting psr.i requires data serialization
122 * - we need a stop-bit before reading PSR because we sometimes
123 * write a floating-point register right before reading the PSR
124 * and that writes to PSR.mfl
125 */
126#ifdef CONFIG_PARAVIRT
127#define __local_save_flags() ia64_get_psr_i()
128#else
129#define __local_save_flags() ia64_getreg(_IA64_REG_PSR)
130#endif
131
132#define __local_irq_save(x) \
133do { \
134 ia64_stop(); \
135 (x) = __local_save_flags(); \
136 ia64_stop(); \
137 ia64_rsm(IA64_PSR_I); \
138} while (0)
139
140#define __local_irq_disable() \
141do { \
142 ia64_stop(); \
143 ia64_rsm(IA64_PSR_I); \
144} while (0)
145
146#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I)
147
148#ifdef CONFIG_IA64_DEBUG_IRQ
149
150 extern unsigned long last_cli_ip;
151
152# define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP)
153
154# define local_irq_save(x) \
155do { \
156 unsigned long __psr; \
157 \
158 __local_irq_save(__psr); \
159 if (__psr & IA64_PSR_I) \
160 __save_ip(); \
161 (x) = __psr; \
162} while (0)
163
164# define local_irq_disable() do { unsigned long __x; local_irq_save(__x); } while (0)
165
166# define local_irq_restore(x) \
167do { \
168 unsigned long __old_psr, __psr = (x); \
169 \
170 local_save_flags(__old_psr); \
171 __local_irq_restore(__psr); \
172 if ((__old_psr & IA64_PSR_I) && !(__psr & IA64_PSR_I)) \
173 __save_ip(); \
174} while (0)
175
176#else /* !CONFIG_IA64_DEBUG_IRQ */
177# define local_irq_save(x) __local_irq_save(x)
178# define local_irq_disable() __local_irq_disable()
179# define local_irq_restore(x) __local_irq_restore(x)
180#endif /* !CONFIG_IA64_DEBUG_IRQ */
181
182#define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
183#define local_save_flags(flags) ({ ia64_stop(); (flags) = __local_save_flags(); })
184
185#define irqs_disabled() \
186({ \
187 unsigned long __ia64_id_flags; \
188 local_save_flags(__ia64_id_flags); \
189 (__ia64_id_flags & IA64_PSR_I) == 0; \
190})
191 115
192#ifdef __KERNEL__ 116#ifdef __KERNEL__
193 117
diff --git a/arch/m32r/include/asm/irqflags.h b/arch/m32r/include/asm/irqflags.h
new file mode 100644
index 000000000000..1f92d29982ae
--- /dev/null
+++ b/arch/m32r/include/asm/irqflags.h
@@ -0,0 +1,104 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
7 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
8 */
9
10#ifndef _ASM_M32R_IRQFLAGS_H
11#define _ASM_M32R_IRQFLAGS_H
12
13#include <linux/types.h>
14
15static inline unsigned long arch_local_save_flags(void)
16{
17 unsigned long flags;
18 asm volatile("mvfc %0,psw" : "=r"(flags));
19 return flags;
20}
21
22static inline void arch_local_irq_disable(void)
23{
24#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
25 asm volatile (
26 "clrpsw #0x40 -> nop"
27 : : : "memory");
28#else
29 unsigned long tmpreg0, tmpreg1;
30 asm volatile (
31 "ld24 %0, #0 ; Use 32-bit insn. \n\t"
32 "mvfc %1, psw ; No interrupt can be accepted here. \n\t"
33 "mvtc %0, psw \n\t"
34 "and3 %0, %1, #0xffbf \n\t"
35 "mvtc %0, psw \n\t"
36 : "=&r" (tmpreg0), "=&r" (tmpreg1)
37 :
38 : "cbit", "memory");
39#endif
40}
41
42static inline void arch_local_irq_enable(void)
43{
44#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
45 asm volatile (
46 "setpsw #0x40 -> nop"
47 : : : "memory");
48#else
49 unsigned long tmpreg;
50 asm volatile (
51 "mvfc %0, psw; \n\t"
52 "or3 %0, %0, #0x0040; \n\t"
53 "mvtc %0, psw; \n\t"
54 : "=&r" (tmpreg)
55 :
56 : "cbit", "memory");
57#endif
58}
59
60static inline unsigned long arch_local_irq_save(void)
61{
62 unsigned long flags;
63
64#if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104))
65 asm volatile (
66 "mvfc %0, psw; \n\t"
67 "clrpsw #0x40 -> nop; \n\t"
68 : "=r" (flags)
69 :
70 : "memory");
71#else
72 unsigned long tmpreg;
73 asm volatile (
74 "ld24 %1, #0 \n\t"
75 "mvfc %0, psw \n\t"
76 "mvtc %1, psw \n\t"
77 "and3 %1, %0, #0xffbf \n\t"
78 "mvtc %1, psw \n\t"
79 : "=r" (flags), "=&r" (tmpreg)
80 :
81 : "cbit", "memory");
82#endif
83 return flags;
84}
85
86static inline void arch_local_irq_restore(unsigned long flags)
87{
88 asm volatile("mvtc %0,psw"
89 :
90 : "r" (flags)
91 : "cbit", "memory");
92}
93
94static inline bool arch_irqs_disabled_flags(unsigned long flags)
95{
96 return !(flags & 0x40);
97}
98
99static inline bool arch_irqs_disabled(void)
100{
101 return arch_irqs_disabled_flags(arch_local_save_flags());
102}
103
104#endif /* _ASM_M32R_IRQFLAGS_H */
diff --git a/arch/m32r/include/asm/system.h b/arch/m32r/include/asm/system.h
index c980f5ba8de7..13c46794ccb1 100644
--- a/arch/m32r/include/asm/system.h
+++ b/arch/m32r/include/asm/system.h
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/compiler.h> 13#include <linux/compiler.h>
14#include <linux/irqflags.h>
14#include <asm/assembler.h> 15#include <asm/assembler.h>
15 16
16#ifdef __KERNEL__ 17#ifdef __KERNEL__
@@ -54,71 +55,6 @@
54 ); \ 55 ); \
55} while(0) 56} while(0)
56 57
57/* Interrupt Control */
58#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
59#define local_irq_enable() \
60 __asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory")
61#define local_irq_disable() \
62 __asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory")
63#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
64static inline void local_irq_enable(void)
65{
66 unsigned long tmpreg;
67 __asm__ __volatile__(
68 "mvfc %0, psw; \n\t"
69 "or3 %0, %0, #0x0040; \n\t"
70 "mvtc %0, psw; \n\t"
71 : "=&r" (tmpreg) : : "cbit", "memory");
72}
73
74static inline void local_irq_disable(void)
75{
76 unsigned long tmpreg0, tmpreg1;
77 __asm__ __volatile__(
78 "ld24 %0, #0 ; Use 32-bit insn. \n\t"
79 "mvfc %1, psw ; No interrupt can be accepted here. \n\t"
80 "mvtc %0, psw \n\t"
81 "and3 %0, %1, #0xffbf \n\t"
82 "mvtc %0, psw \n\t"
83 : "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory");
84}
85#endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
86
87#define local_save_flags(x) \
88 __asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */)
89
90#define local_irq_restore(x) \
91 __asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \
92 : "r" (x) : "cbit", "memory")
93
94#if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104))
95#define local_irq_save(x) \
96 __asm__ __volatile__( \
97 "mvfc %0, psw; \n\t" \
98 "clrpsw #0x40 -> nop; \n\t" \
99 : "=r" (x) : /* no input */ : "memory")
100#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
101#define local_irq_save(x) \
102 ({ \
103 unsigned long tmpreg; \
104 __asm__ __volatile__( \
105 "ld24 %1, #0 \n\t" \
106 "mvfc %0, psw \n\t" \
107 "mvtc %1, psw \n\t" \
108 "and3 %1, %0, #0xffbf \n\t" \
109 "mvtc %1, psw \n\t" \
110 : "=r" (x), "=&r" (tmpreg) \
111 : : "cbit", "memory"); \
112 })
113#endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
114
115#define irqs_disabled() \
116 ({ \
117 unsigned long flags; \
118 local_save_flags(flags); \
119 !(flags & 0x40); \
120 })
121
122#define nop() __asm__ __volatile__ ("nop" : : ) 58#define nop() __asm__ __volatile__ ("nop" : : )
123 59
124#define xchg(ptr, x) \ 60#define xchg(ptr, x) \
diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h
index 907ed03d792f..80e41492aa2a 100644
--- a/arch/m68k/include/asm/entry_no.h
+++ b/arch/m68k/include/asm/entry_no.h
@@ -28,7 +28,7 @@
28 * M68K COLDFIRE 28 * M68K COLDFIRE
29 */ 29 */
30 30
31#define ALLOWINT 0xf8ff 31#define ALLOWINT (~0x700)
32 32
33#ifdef __ASSEMBLY__ 33#ifdef __ASSEMBLY__
34 34
diff --git a/arch/m68k/include/asm/irqflags.h b/arch/m68k/include/asm/irqflags.h
new file mode 100644
index 000000000000..4a5b284a1550
--- /dev/null
+++ b/arch/m68k/include/asm/irqflags.h
@@ -0,0 +1,76 @@
1#ifndef _M68K_IRQFLAGS_H
2#define _M68K_IRQFLAGS_H
3
4#include <linux/types.h>
5#include <linux/hardirq.h>
6#include <linux/preempt.h>
7#include <asm/thread_info.h>
8#include <asm/entry.h>
9
10static inline unsigned long arch_local_save_flags(void)
11{
12 unsigned long flags;
13 asm volatile ("movew %%sr,%0" : "=d" (flags) : : "memory");
14 return flags;
15}
16
17static inline void arch_local_irq_disable(void)
18{
19#ifdef CONFIG_COLDFIRE
20 asm volatile (
21 "move %/sr,%%d0 \n\t"
22 "ori.l #0x0700,%%d0 \n\t"
23 "move %%d0,%/sr \n"
24 : /* no outputs */
25 :
26 : "cc", "%d0", "memory");
27#else
28 asm volatile ("oriw #0x0700,%%sr" : : : "memory");
29#endif
30}
31
32static inline void arch_local_irq_enable(void)
33{
34#if defined(CONFIG_COLDFIRE)
35 asm volatile (
36 "move %/sr,%%d0 \n\t"
37 "andi.l #0xf8ff,%%d0 \n\t"
38 "move %%d0,%/sr \n"
39 : /* no outputs */
40 :
41 : "cc", "%d0", "memory");
42#else
43# if defined(CONFIG_MMU)
44 if (MACH_IS_Q40 || !hardirq_count())
45# endif
46 asm volatile (
47 "andiw %0,%%sr"
48 :
49 : "i" (ALLOWINT)
50 : "memory");
51#endif
52}
53
54static inline unsigned long arch_local_irq_save(void)
55{
56 unsigned long flags = arch_local_save_flags();
57 arch_local_irq_disable();
58 return flags;
59}
60
61static inline void arch_local_irq_restore(unsigned long flags)
62{
63 asm volatile ("movew %0,%%sr" : : "d" (flags) : "memory");
64}
65
66static inline bool arch_irqs_disabled_flags(unsigned long flags)
67{
68 return (flags & ~ALLOWINT) != 0;
69}
70
71static inline bool arch_irqs_disabled(void)
72{
73 return arch_irqs_disabled_flags(arch_local_save_flags());
74}
75
76#endif /* _M68K_IRQFLAGS_H */
diff --git a/arch/m68k/include/asm/system_mm.h b/arch/m68k/include/asm/system_mm.h
index dbb6515ffd5b..12053c44cccf 100644
--- a/arch/m68k/include/asm/system_mm.h
+++ b/arch/m68k/include/asm/system_mm.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/linkage.h> 4#include <linux/linkage.h>
5#include <linux/kernel.h> 5#include <linux/kernel.h>
6#include <linux/irqflags.h>
6#include <asm/segment.h> 7#include <asm/segment.h>
7#include <asm/entry.h> 8#include <asm/entry.h>
8 9
@@ -62,30 +63,6 @@ asmlinkage void resume(void);
62#define smp_wmb() barrier() 63#define smp_wmb() barrier()
63#define smp_read_barrier_depends() ((void)0) 64#define smp_read_barrier_depends() ((void)0)
64 65
65/* interrupt control.. */
66#if 0
67#define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory")
68#else
69#include <linux/hardirq.h>
70#define local_irq_enable() ({ \
71 if (MACH_IS_Q40 || !hardirq_count()) \
72 asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory"); \
73})
74#endif
75#define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory")
76#define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory")
77#define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory")
78
79static inline int irqs_disabled(void)
80{
81 unsigned long flags;
82 local_save_flags(flags);
83 return flags & ~ALLOWINT;
84}
85
86/* For spinlocks etc */
87#define local_irq_save(x) ({ local_save_flags(x); local_irq_disable(); })
88
89#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 66#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
90 67
91struct __xchg_dummy { unsigned long a[100]; }; 68struct __xchg_dummy { unsigned long a[100]; };
diff --git a/arch/m68k/include/asm/system_no.h b/arch/m68k/include/asm/system_no.h
index 3c0718d74398..20126c09794e 100644
--- a/arch/m68k/include/asm/system_no.h
+++ b/arch/m68k/include/asm/system_no.h
@@ -2,6 +2,7 @@
2#define _M68KNOMMU_SYSTEM_H 2#define _M68KNOMMU_SYSTEM_H
3 3
4#include <linux/linkage.h> 4#include <linux/linkage.h>
5#include <linux/irqflags.h>
5#include <asm/segment.h> 6#include <asm/segment.h>
6#include <asm/entry.h> 7#include <asm/entry.h>
7 8
@@ -46,54 +47,6 @@ asmlinkage void resume(void);
46 (last) = _last; \ 47 (last) = _last; \
47} 48}
48 49
49#ifdef CONFIG_COLDFIRE
50#define local_irq_enable() __asm__ __volatile__ ( \
51 "move %/sr,%%d0\n\t" \
52 "andi.l #0xf8ff,%%d0\n\t" \
53 "move %%d0,%/sr\n" \
54 : /* no outputs */ \
55 : \
56 : "cc", "%d0", "memory")
57#define local_irq_disable() __asm__ __volatile__ ( \
58 "move %/sr,%%d0\n\t" \
59 "ori.l #0x0700,%%d0\n\t" \
60 "move %%d0,%/sr\n" \
61 : /* no outputs */ \
62 : \
63 : "cc", "%d0", "memory")
64/* For spinlocks etc */
65#define local_irq_save(x) __asm__ __volatile__ ( \
66 "movew %%sr,%0\n\t" \
67 "movew #0x0700,%%d0\n\t" \
68 "or.l %0,%%d0\n\t" \
69 "movew %%d0,%/sr" \
70 : "=d" (x) \
71 : \
72 : "cc", "%d0", "memory")
73#else
74
75/* portable version */ /* FIXME - see entry.h*/
76#define ALLOWINT 0xf8ff
77
78#define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory")
79#define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory")
80#endif
81
82#define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory")
83#define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory")
84
85/* For spinlocks etc */
86#ifndef local_irq_save
87#define local_irq_save(x) do { local_save_flags(x); local_irq_disable(); } while (0)
88#endif
89
90#define irqs_disabled() \
91({ \
92 unsigned long flags; \
93 local_save_flags(flags); \
94 ((flags & 0x0700) == 0x0700); \
95})
96
97#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") 50#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
98 51
99/* 52/*
@@ -206,12 +159,4 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
206#define arch_align_stack(x) (x) 159#define arch_align_stack(x) (x)
207 160
208 161
209static inline int irqs_disabled_flags(unsigned long flags)
210{
211 if (flags & 0x0700)
212 return 0;
213 else
214 return 1;
215}
216
217#endif /* _M68KNOMMU_SYSTEM_H */ 162#endif /* _M68KNOMMU_SYSTEM_H */
diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c
index 9a8876f715d8..24335022fa2c 100644
--- a/arch/m68knommu/kernel/asm-offsets.c
+++ b/arch/m68knommu/kernel/asm-offsets.c
@@ -74,8 +74,6 @@ int main(void)
74 74
75 DEFINE(PT_PTRACED, PT_PTRACED); 75 DEFINE(PT_PTRACED, PT_PTRACED);
76 76
77 DEFINE(THREAD_SIZE, THREAD_SIZE);
78
79 /* Offsets in thread_info structure */ 77 /* Offsets in thread_info structure */
80 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 78 DEFINE(TI_TASK, offsetof(struct thread_info, task));
81 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); 79 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
diff --git a/arch/m68knommu/platform/coldfire/head.S b/arch/m68knommu/platform/coldfire/head.S
index 4b91aa24eb00..0b2d7c7adf79 100644
--- a/arch/m68knommu/platform/coldfire/head.S
+++ b/arch/m68knommu/platform/coldfire/head.S
@@ -15,6 +15,7 @@
15#include <asm/coldfire.h> 15#include <asm/coldfire.h>
16#include <asm/mcfcache.h> 16#include <asm/mcfcache.h>
17#include <asm/mcfsim.h> 17#include <asm/mcfsim.h>
18#include <asm/thread_info.h>
18 19
19/*****************************************************************************/ 20/*****************************************************************************/
20 21
diff --git a/arch/microblaze/include/asm/irqflags.h b/arch/microblaze/include/asm/irqflags.h
index 2c38c6d80176..5fd31905775d 100644
--- a/arch/microblaze/include/asm/irqflags.h
+++ b/arch/microblaze/include/asm/irqflags.h
@@ -9,103 +9,114 @@
9#ifndef _ASM_MICROBLAZE_IRQFLAGS_H 9#ifndef _ASM_MICROBLAZE_IRQFLAGS_H
10#define _ASM_MICROBLAZE_IRQFLAGS_H 10#define _ASM_MICROBLAZE_IRQFLAGS_H
11 11
12#include <linux/irqflags.h> 12#include <linux/types.h>
13#include <asm/registers.h> 13#include <asm/registers.h>
14 14
15# if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 15#ifdef CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
16 16
17# define raw_local_irq_save(flags) \ 17static inline unsigned long arch_local_irq_save(void)
18 do { \ 18{
19 asm volatile (" msrclr %0, %1; \ 19 unsigned long flags;
20 nop;" \ 20 asm volatile(" msrclr %0, %1 \n"
21 : "=r"(flags) \ 21 " nop \n"
22 : "i"(MSR_IE) \ 22 : "=r"(flags)
23 : "memory"); \ 23 : "i"(MSR_IE)
24 } while (0) 24 : "memory");
25 25 return flags;
26# define raw_local_irq_disable() \ 26}
27 do { \ 27
28 asm volatile (" msrclr r0, %0; \ 28static inline void arch_local_irq_disable(void)
29 nop;" \ 29{
30 : \ 30 /* this uses r0 without declaring it - is that correct? */
31 : "i"(MSR_IE) \ 31 asm volatile(" msrclr r0, %0 \n"
32 : "memory"); \ 32 " nop \n"
33 } while (0) 33 :
34 34 : "i"(MSR_IE)
35# define raw_local_irq_enable() \ 35 : "memory");
36 do { \ 36}
37 asm volatile (" msrset r0, %0; \ 37
38 nop;" \ 38static inline void arch_local_irq_enable(void)
39 : \ 39{
40 : "i"(MSR_IE) \ 40 /* this uses r0 without declaring it - is that correct? */
41 : "memory"); \ 41 asm volatile(" msrset r0, %0 \n"
42 } while (0) 42 " nop \n"
43 43 :
44# else /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR == 0 */ 44 : "i"(MSR_IE)
45 45 : "memory");
46# define raw_local_irq_save(flags) \ 46}
47 do { \ 47
48 register unsigned tmp; \ 48#else /* !CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */
49 asm volatile (" mfs %0, rmsr; \ 49
50 nop; \ 50static inline unsigned long arch_local_irq_save(void)
51 andi %1, %0, %2; \ 51{
52 mts rmsr, %1; \ 52 unsigned long flags, tmp;
53 nop;" \ 53 asm volatile (" mfs %0, rmsr \n"
54 : "=r"(flags), "=r" (tmp) \ 54 " nop \n"
55 : "i"(~MSR_IE) \ 55 " andi %1, %0, %2 \n"
56 : "memory"); \ 56 " mts rmsr, %1 \n"
57 } while (0) 57 " nop \n"
58 58 : "=r"(flags), "=r"(tmp)
59# define raw_local_irq_disable() \ 59 : "i"(~MSR_IE)
60 do { \ 60 : "memory");
61 register unsigned tmp; \ 61 return flags;
62 asm volatile (" mfs %0, rmsr; \ 62}
63 nop; \ 63
64 andi %0, %0, %1; \ 64static inline void arch_local_irq_disable(void)
65 mts rmsr, %0; \ 65{
66 nop;" \ 66 unsigned long tmp;
67 : "=r"(tmp) \ 67 asm volatile(" mfs %0, rmsr \n"
68 : "i"(~MSR_IE) \ 68 " nop \n"
69 : "memory"); \ 69 " andi %0, %0, %1 \n"
70 } while (0) 70 " mts rmsr, %0 \n"
71 71 " nop \n"
72# define raw_local_irq_enable() \ 72 : "=r"(tmp)
73 do { \ 73 : "i"(~MSR_IE)
74 register unsigned tmp; \ 74 : "memory");
75 asm volatile (" mfs %0, rmsr; \ 75}
76 nop; \ 76
77 ori %0, %0, %1; \ 77static inline void arch_local_irq_enable(void)
78 mts rmsr, %0; \ 78{
79 nop;" \ 79 unsigned long tmp;
80 : "=r"(tmp) \ 80 asm volatile(" mfs %0, rmsr \n"
81 : "i"(MSR_IE) \ 81 " nop \n"
82 : "memory"); \ 82 " ori %0, %0, %1 \n"
83 } while (0) 83 " mts rmsr, %0 \n"
84 84 " nop \n"
85# endif /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */ 85 : "=r"(tmp)
86 86 : "i"(MSR_IE)
87#define raw_local_irq_restore(flags) \ 87 : "memory");
88 do { \ 88}
89 asm volatile (" mts rmsr, %0; \ 89
90 nop;" \ 90#endif /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */
91 : \ 91
92 : "r"(flags) \ 92static inline unsigned long arch_local_save_flags(void)
93 : "memory"); \
94 } while (0)
95
96static inline unsigned long get_msr(void)
97{ 93{
98 unsigned long flags; 94 unsigned long flags;
99 asm volatile (" mfs %0, rmsr; \ 95 asm volatile(" mfs %0, rmsr \n"
100 nop;" \ 96 " nop \n"
101 : "=r"(flags) \ 97 : "=r"(flags)
102 : \ 98 :
103 : "memory"); \ 99 : "memory");
104 return flags; 100 return flags;
105} 101}
106 102
107#define raw_local_save_flags(flags) ((flags) = get_msr()) 103static inline void arch_local_irq_restore(unsigned long flags)
108#define raw_irqs_disabled() ((get_msr() & MSR_IE) == 0) 104{
109#define raw_irqs_disabled_flags(flags) ((flags & MSR_IE) == 0) 105 asm volatile(" mts rmsr, %0 \n"
106 " nop \n"
107 :
108 : "r"(flags)
109 : "memory");
110}
111
112static inline bool arch_irqs_disabled_flags(unsigned long flags)
113{
114 return (flags & MSR_IE) == 0;
115}
116
117static inline bool arch_irqs_disabled(void)
118{
119 return arch_irqs_disabled_flags(arch_local_save_flags());
120}
110 121
111#endif /* _ASM_MICROBLAZE_IRQFLAGS_H */ 122#endif /* _ASM_MICROBLAZE_IRQFLAGS_H */
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 701ec0ba8fa9..9ef3b0d17896 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -17,7 +17,7 @@
17#include <asm/hazards.h> 17#include <asm/hazards.h>
18 18
19__asm__( 19__asm__(
20 " .macro raw_local_irq_enable \n" 20 " .macro arch_local_irq_enable \n"
21 " .set push \n" 21 " .set push \n"
22 " .set reorder \n" 22 " .set reorder \n"
23 " .set noat \n" 23 " .set noat \n"
@@ -40,7 +40,7 @@ __asm__(
40 40
41extern void smtc_ipi_replay(void); 41extern void smtc_ipi_replay(void);
42 42
43static inline void raw_local_irq_enable(void) 43static inline void arch_local_irq_enable(void)
44{ 44{
45#ifdef CONFIG_MIPS_MT_SMTC 45#ifdef CONFIG_MIPS_MT_SMTC
46 /* 46 /*
@@ -50,7 +50,7 @@ static inline void raw_local_irq_enable(void)
50 smtc_ipi_replay(); 50 smtc_ipi_replay();
51#endif 51#endif
52 __asm__ __volatile__( 52 __asm__ __volatile__(
53 "raw_local_irq_enable" 53 "arch_local_irq_enable"
54 : /* no outputs */ 54 : /* no outputs */
55 : /* no inputs */ 55 : /* no inputs */
56 : "memory"); 56 : "memory");
@@ -76,7 +76,7 @@ static inline void raw_local_irq_enable(void)
76 * Workaround: mask EXL bit of the result or place a nop before mfc0. 76 * Workaround: mask EXL bit of the result or place a nop before mfc0.
77 */ 77 */
78__asm__( 78__asm__(
79 " .macro raw_local_irq_disable\n" 79 " .macro arch_local_irq_disable\n"
80 " .set push \n" 80 " .set push \n"
81 " .set noat \n" 81 " .set noat \n"
82#ifdef CONFIG_MIPS_MT_SMTC 82#ifdef CONFIG_MIPS_MT_SMTC
@@ -97,17 +97,17 @@ __asm__(
97 " .set pop \n" 97 " .set pop \n"
98 " .endm \n"); 98 " .endm \n");
99 99
100static inline void raw_local_irq_disable(void) 100static inline void arch_local_irq_disable(void)
101{ 101{
102 __asm__ __volatile__( 102 __asm__ __volatile__(
103 "raw_local_irq_disable" 103 "arch_local_irq_disable"
104 : /* no outputs */ 104 : /* no outputs */
105 : /* no inputs */ 105 : /* no inputs */
106 : "memory"); 106 : "memory");
107} 107}
108 108
109__asm__( 109__asm__(
110 " .macro raw_local_save_flags flags \n" 110 " .macro arch_local_save_flags flags \n"
111 " .set push \n" 111 " .set push \n"
112 " .set reorder \n" 112 " .set reorder \n"
113#ifdef CONFIG_MIPS_MT_SMTC 113#ifdef CONFIG_MIPS_MT_SMTC
@@ -118,13 +118,15 @@ __asm__(
118 " .set pop \n" 118 " .set pop \n"
119 " .endm \n"); 119 " .endm \n");
120 120
121#define raw_local_save_flags(x) \ 121static inline unsigned long arch_local_save_flags(void)
122__asm__ __volatile__( \ 122{
123 "raw_local_save_flags %0" \ 123 unsigned long flags;
124 : "=r" (x)) 124 asm volatile("arch_local_save_flags %0" : "=r" (flags));
125 return flags;
126}
125 127
126__asm__( 128__asm__(
127 " .macro raw_local_irq_save result \n" 129 " .macro arch_local_irq_save result \n"
128 " .set push \n" 130 " .set push \n"
129 " .set reorder \n" 131 " .set reorder \n"
130 " .set noat \n" 132 " .set noat \n"
@@ -148,15 +150,18 @@ __asm__(
148 " .set pop \n" 150 " .set pop \n"
149 " .endm \n"); 151 " .endm \n");
150 152
151#define raw_local_irq_save(x) \ 153static inline unsigned long arch_local_irq_save(void)
152__asm__ __volatile__( \ 154{
153 "raw_local_irq_save\t%0" \ 155 unsigned long flags;
154 : "=r" (x) \ 156 asm volatile("arch_local_irq_save\t%0"
155 : /* no inputs */ \ 157 : "=r" (flags)
156 : "memory") 158 : /* no inputs */
159 : "memory");
160 return flags;
161}
157 162
158__asm__( 163__asm__(
159 " .macro raw_local_irq_restore flags \n" 164 " .macro arch_local_irq_restore flags \n"
160 " .set push \n" 165 " .set push \n"
161 " .set noreorder \n" 166 " .set noreorder \n"
162 " .set noat \n" 167 " .set noat \n"
@@ -196,7 +201,7 @@ __asm__(
196 " .endm \n"); 201 " .endm \n");
197 202
198 203
199static inline void raw_local_irq_restore(unsigned long flags) 204static inline void arch_local_irq_restore(unsigned long flags)
200{ 205{
201 unsigned long __tmp1; 206 unsigned long __tmp1;
202 207
@@ -211,24 +216,24 @@ static inline void raw_local_irq_restore(unsigned long flags)
211#endif 216#endif
212 217
213 __asm__ __volatile__( 218 __asm__ __volatile__(
214 "raw_local_irq_restore\t%0" 219 "arch_local_irq_restore\t%0"
215 : "=r" (__tmp1) 220 : "=r" (__tmp1)
216 : "0" (flags) 221 : "0" (flags)
217 : "memory"); 222 : "memory");
218} 223}
219 224
220static inline void __raw_local_irq_restore(unsigned long flags) 225static inline void __arch_local_irq_restore(unsigned long flags)
221{ 226{
222 unsigned long __tmp1; 227 unsigned long __tmp1;
223 228
224 __asm__ __volatile__( 229 __asm__ __volatile__(
225 "raw_local_irq_restore\t%0" 230 "arch_local_irq_restore\t%0"
226 : "=r" (__tmp1) 231 : "=r" (__tmp1)
227 : "0" (flags) 232 : "0" (flags)
228 : "memory"); 233 : "memory");
229} 234}
230 235
231static inline int raw_irqs_disabled_flags(unsigned long flags) 236static inline int arch_irqs_disabled_flags(unsigned long flags)
232{ 237{
233#ifdef CONFIG_MIPS_MT_SMTC 238#ifdef CONFIG_MIPS_MT_SMTC
234 /* 239 /*
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index cfeb2c155896..39c08254b0f1 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1038,7 +1038,7 @@ void deferred_smtc_ipi(void)
1038 * but it's more efficient, given that we're already 1038 * but it's more efficient, given that we're already
1039 * running down the IPI queue. 1039 * running down the IPI queue.
1040 */ 1040 */
1041 __raw_local_irq_restore(flags); 1041 __arch_local_irq_restore(flags);
1042 } 1042 }
1043} 1043}
1044 1044
@@ -1190,7 +1190,7 @@ void smtc_ipi_replay(void)
1190 /* 1190 /*
1191 ** But use a raw restore here to avoid recursion. 1191 ** But use a raw restore here to avoid recursion.
1192 */ 1192 */
1193 __raw_local_irq_restore(flags); 1193 __arch_local_irq_restore(flags);
1194 1194
1195 if (pipi) { 1195 if (pipi) {
1196 self_ipi(pipi); 1196 self_ipi(pipi);
diff --git a/arch/mn10300/include/asm/irqflags.h b/arch/mn10300/include/asm/irqflags.h
new file mode 100644
index 000000000000..5e529a117cb2
--- /dev/null
+++ b/arch/mn10300/include/asm/irqflags.h
@@ -0,0 +1,123 @@
1/* MN10300 IRQ flag handling
2 *
3 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#ifndef _ASM_IRQFLAGS_H
13#define _ASM_IRQFLAGS_H
14
15#include <asm/cpu-regs.h>
16
17/*
18 * interrupt control
19 * - "disabled": run in IM1/2
20 * - level 0 - GDB stub
21 * - level 1 - virtual serial DMA (if present)
22 * - level 5 - normal interrupt priority
23 * - level 6 - timer interrupt
24 * - "enabled": run in IM7
25 */
26#ifdef CONFIG_MN10300_TTYSM
27#define MN10300_CLI_LEVEL EPSW_IM_2
28#else
29#define MN10300_CLI_LEVEL EPSW_IM_1
30#endif
31
32#ifndef __ASSEMBLY__
33
34static inline unsigned long arch_local_save_flags(void)
35{
36 unsigned long flags;
37
38 asm volatile("mov epsw,%0" : "=d"(flags));
39 return flags;
40}
41
42static inline void arch_local_irq_disable(void)
43{
44 asm volatile(
45 " and %0,epsw \n"
46 " or %1,epsw \n"
47 " nop \n"
48 " nop \n"
49 " nop \n"
50 :
51 : "i"(~EPSW_IM), "i"(EPSW_IE | MN10300_CLI_LEVEL)
52 : "memory");
53}
54
55static inline unsigned long arch_local_irq_save(void)
56{
57 unsigned long flags;
58
59 flags = arch_local_save_flags();
60 arch_local_irq_disable();
61 return flags;
62}
63
64/*
65 * we make sure arch_irq_enable() doesn't cause priority inversion
66 */
67extern unsigned long __mn10300_irq_enabled_epsw;
68
69static inline void arch_local_irq_enable(void)
70{
71 unsigned long tmp;
72
73 asm volatile(
74 " mov epsw,%0 \n"
75 " and %1,%0 \n"
76 " or %2,%0 \n"
77 " mov %0,epsw \n"
78 : "=&d"(tmp)
79 : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw)
80 : "memory");
81}
82
83static inline void arch_local_irq_restore(unsigned long flags)
84{
85 asm volatile(
86 " mov %0,epsw \n"
87 " nop \n"
88 " nop \n"
89 " nop \n"
90 :
91 : "d"(flags)
92 : "memory", "cc");
93}
94
95static inline bool arch_irqs_disabled_flags(unsigned long flags)
96{
97 return (flags & EPSW_IM) <= MN10300_CLI_LEVEL;
98}
99
100static inline bool arch_irqs_disabled(void)
101{
102 return arch_irqs_disabled_flags(arch_local_save_flags());
103}
104
105/*
106 * Hook to save power by halting the CPU
107 * - called from the idle loop
108 * - must reenable interrupts (which takes three instruction cycles to complete)
109 */
110static inline void arch_safe_halt(void)
111{
112 asm volatile(
113 " or %0,epsw \n"
114 " nop \n"
115 " nop \n"
116 " bset %2,(%1) \n"
117 :
118 : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)
119 : "cc");
120}
121
122#endif /* __ASSEMBLY__ */
123#endif /* _ASM_IRQFLAGS_H */
diff --git a/arch/mn10300/include/asm/system.h b/arch/mn10300/include/asm/system.h
index 3636c054dcd5..9f7c7e17c01e 100644
--- a/arch/mn10300/include/asm/system.h
+++ b/arch/mn10300/include/asm/system.h
@@ -17,6 +17,7 @@
17#ifndef __ASSEMBLY__ 17#ifndef __ASSEMBLY__
18 18
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/irqflags.h>
20 21
21struct task_struct; 22struct task_struct;
22struct thread_struct; 23struct thread_struct;
@@ -81,114 +82,6 @@ do { \
81 82
82/*****************************************************************************/ 83/*****************************************************************************/
83/* 84/*
84 * interrupt control
85 * - "disabled": run in IM1/2
86 * - level 0 - GDB stub
87 * - level 1 - virtual serial DMA (if present)
88 * - level 5 - normal interrupt priority
89 * - level 6 - timer interrupt
90 * - "enabled": run in IM7
91 */
92#ifdef CONFIG_MN10300_TTYSM
93#define MN10300_CLI_LEVEL EPSW_IM_2
94#else
95#define MN10300_CLI_LEVEL EPSW_IM_1
96#endif
97
98#define local_save_flags(x) \
99do { \
100 typecheck(unsigned long, x); \
101 asm volatile( \
102 " mov epsw,%0 \n" \
103 : "=d"(x) \
104 ); \
105} while (0)
106
107#define local_irq_disable() \
108do { \
109 asm volatile( \
110 " and %0,epsw \n" \
111 " or %1,epsw \n" \
112 " nop \n" \
113 " nop \n" \
114 " nop \n" \
115 : \
116 : "i"(~EPSW_IM), "i"(EPSW_IE | MN10300_CLI_LEVEL) \
117 ); \
118} while (0)
119
120#define local_irq_save(x) \
121do { \
122 local_save_flags(x); \
123 local_irq_disable(); \
124} while (0)
125
126/*
127 * we make sure local_irq_enable() doesn't cause priority inversion
128 */
129#ifndef __ASSEMBLY__
130
131extern unsigned long __mn10300_irq_enabled_epsw;
132
133#endif
134
135#define local_irq_enable() \
136do { \
137 unsigned long tmp; \
138 \
139 asm volatile( \
140 " mov epsw,%0 \n" \
141 " and %1,%0 \n" \
142 " or %2,%0 \n" \
143 " mov %0,epsw \n" \
144 : "=&d"(tmp) \
145 : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw) \
146 : "cc" \
147 ); \
148} while (0)
149
150#define local_irq_restore(x) \
151do { \
152 typecheck(unsigned long, x); \
153 asm volatile( \
154 " mov %0,epsw \n" \
155 " nop \n" \
156 " nop \n" \
157 " nop \n" \
158 : \
159 : "d"(x) \
160 : "memory", "cc" \
161 ); \
162} while (0)
163
164#define irqs_disabled() \
165({ \
166 unsigned long flags; \
167 local_save_flags(flags); \
168 (flags & EPSW_IM) <= MN10300_CLI_LEVEL; \
169})
170
171/* hook to save power by halting the CPU
172 * - called from the idle loop
173 * - must reenable interrupts (which takes three instruction cycles to complete)
174 */
175#define safe_halt() \
176do { \
177 asm volatile(" or %0,epsw \n" \
178 " nop \n" \
179 " nop \n" \
180 " bset %2,(%1) \n" \
181 : \
182 : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)\
183 : "cc" \
184 ); \
185} while (0)
186
187#define STI or EPSW_IE|EPSW_IM,epsw
188#define CLI and ~EPSW_IM,epsw; or EPSW_IE|MN10300_CLI_LEVEL,epsw; nop; nop; nop
189
190/*****************************************************************************/
191/*
192 * MN10300 doesn't actually have an exchange instruction 85 * MN10300 doesn't actually have an exchange instruction
193 */ 86 */
194#ifndef __ASSEMBLY__ 87#ifndef __ASSEMBLY__
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index d9ed5a15c547..3d394b4eefba 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -16,6 +16,7 @@
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <asm/smp.h> 17#include <asm/smp.h>
18#include <asm/system.h> 18#include <asm/system.h>
19#include <asm/irqflags.h>
19#include <asm/thread_info.h> 20#include <asm/thread_info.h>
20#include <asm/intctl-regs.h> 21#include <asm/intctl-regs.h>
21#include <asm/busctl-regs.h> 22#include <asm/busctl-regs.h>
diff --git a/arch/parisc/include/asm/irqflags.h b/arch/parisc/include/asm/irqflags.h
new file mode 100644
index 000000000000..34f9cb9b4754
--- /dev/null
+++ b/arch/parisc/include/asm/irqflags.h
@@ -0,0 +1,46 @@
1#ifndef __PARISC_IRQFLAGS_H
2#define __PARISC_IRQFLAGS_H
3
4#include <linux/types.h>
5#include <asm/psw.h>
6
7static inline unsigned long arch_local_save_flags(void)
8{
9 unsigned long flags;
10 asm volatile("ssm 0, %0" : "=r" (flags) : : "memory");
11 return flags;
12}
13
14static inline void arch_local_irq_disable(void)
15{
16 asm volatile("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory");
17}
18
19static inline void arch_local_irq_enable(void)
20{
21 asm volatile("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory");
22}
23
24static inline unsigned long arch_local_irq_save(void)
25{
26 unsigned long flags;
27 asm volatile("rsm %1,%0" : "=r" (flags) : "i" (PSW_I) : "memory");
28 return flags;
29}
30
31static inline void arch_local_irq_restore(unsigned long flags)
32{
33 asm volatile("mtsm %0" : : "r" (flags) : "memory");
34}
35
36static inline bool arch_irqs_disabled_flags(unsigned long flags)
37{
38 return (flags & PSW_I) == 0;
39}
40
41static inline bool arch_irqs_disabled(void)
42{
43 return arch_irqs_disabled_flags(arch_local_save_flags());
44}
45
46#endif /* __PARISC_IRQFLAGS_H */
diff --git a/arch/parisc/include/asm/system.h b/arch/parisc/include/asm/system.h
index 2ab4af58ecb9..b19e63a8e848 100644
--- a/arch/parisc/include/asm/system.h
+++ b/arch/parisc/include/asm/system.h
@@ -1,7 +1,7 @@
1#ifndef __PARISC_SYSTEM_H 1#ifndef __PARISC_SYSTEM_H
2#define __PARISC_SYSTEM_H 2#define __PARISC_SYSTEM_H
3 3
4#include <asm/psw.h> 4#include <linux/irqflags.h>
5 5
6/* The program status word as bitfields. */ 6/* The program status word as bitfields. */
7struct pa_psw { 7struct pa_psw {
@@ -48,23 +48,6 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *
48 (last) = _switch_to(prev, next); \ 48 (last) = _switch_to(prev, next); \
49} while(0) 49} while(0)
50 50
51/* interrupt control */
52#define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory")
53#define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" )
54#define local_irq_enable() __asm__ __volatile__("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory" )
55
56#define local_irq_save(x) \
57 __asm__ __volatile__("rsm %1,%0" : "=r" (x) :"i" (PSW_I) : "memory" )
58#define local_irq_restore(x) \
59 __asm__ __volatile__("mtsm %0" : : "r" (x) : "memory" )
60
61#define irqs_disabled() \
62({ \
63 unsigned long flags; \
64 local_save_flags(flags); \
65 (flags & PSW_I) == 0; \
66})
67
68#define mfctl(reg) ({ \ 51#define mfctl(reg) ({ \
69 unsigned long cr; \ 52 unsigned long cr; \
70 __asm__ __volatile__( \ 53 __asm__ __volatile__( \
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index bd100fcf40d0..ff08b70b36d4 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -16,42 +16,57 @@ extern void timer_interrupt(struct pt_regs *);
16#ifdef CONFIG_PPC64 16#ifdef CONFIG_PPC64
17#include <asm/paca.h> 17#include <asm/paca.h>
18 18
19static inline unsigned long local_get_flags(void) 19static inline unsigned long arch_local_save_flags(void)
20{ 20{
21 unsigned long flags; 21 unsigned long flags;
22 22
23 __asm__ __volatile__("lbz %0,%1(13)" 23 asm volatile(
24 : "=r" (flags) 24 "lbz %0,%1(13)"
25 : "i" (offsetof(struct paca_struct, soft_enabled))); 25 : "=r" (flags)
26 : "i" (offsetof(struct paca_struct, soft_enabled)));
26 27
27 return flags; 28 return flags;
28} 29}
29 30
30static inline unsigned long raw_local_irq_disable(void) 31static inline unsigned long arch_local_irq_disable(void)
31{ 32{
32 unsigned long flags, zero; 33 unsigned long flags, zero;
33 34
34 __asm__ __volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)" 35 asm volatile(
35 : "=r" (flags), "=&r" (zero) 36 "li %1,0; lbz %0,%2(13); stb %1,%2(13)"
36 : "i" (offsetof(struct paca_struct, soft_enabled)) 37 : "=r" (flags), "=&r" (zero)
37 : "memory"); 38 : "i" (offsetof(struct paca_struct, soft_enabled))
39 : "memory");
38 40
39 return flags; 41 return flags;
40} 42}
41 43
42extern void raw_local_irq_restore(unsigned long); 44extern void arch_local_irq_restore(unsigned long);
43extern void iseries_handle_interrupts(void); 45extern void iseries_handle_interrupts(void);
44 46
45#define raw_local_irq_enable() raw_local_irq_restore(1) 47static inline void arch_local_irq_enable(void)
46#define raw_local_save_flags(flags) ((flags) = local_get_flags()) 48{
47#define raw_local_irq_save(flags) ((flags) = raw_local_irq_disable()) 49 arch_local_irq_restore(1);
50}
51
52static inline unsigned long arch_local_irq_save(void)
53{
54 return arch_local_irq_disable();
55}
56
57static inline bool arch_irqs_disabled_flags(unsigned long flags)
58{
59 return flags == 0;
60}
48 61
49#define raw_irqs_disabled() (local_get_flags() == 0) 62static inline bool arch_irqs_disabled(void)
50#define raw_irqs_disabled_flags(flags) ((flags) == 0) 63{
64 return arch_irqs_disabled_flags(arch_local_save_flags());
65}
51 66
52#ifdef CONFIG_PPC_BOOK3E 67#ifdef CONFIG_PPC_BOOK3E
53#define __hard_irq_enable() __asm__ __volatile__("wrteei 1": : :"memory"); 68#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory");
54#define __hard_irq_disable() __asm__ __volatile__("wrteei 0": : :"memory"); 69#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory");
55#else 70#else
56#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) 71#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
57#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) 72#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
@@ -64,64 +79,66 @@ extern void iseries_handle_interrupts(void);
64 get_paca()->hard_enabled = 0; \ 79 get_paca()->hard_enabled = 0; \
65 } while(0) 80 } while(0)
66 81
67#else 82#else /* CONFIG_PPC64 */
68 83
69#if defined(CONFIG_BOOKE)
70#define SET_MSR_EE(x) mtmsr(x) 84#define SET_MSR_EE(x) mtmsr(x)
71#define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") 85
86static inline unsigned long arch_local_save_flags(void)
87{
88 return mfmsr();
89}
90
91static inline void arch_local_irq_restore(unsigned long flags)
92{
93#if defined(CONFIG_BOOKE)
94 asm volatile("wrtee %0" : : "r" (flags) : "memory");
72#else 95#else
73#define SET_MSR_EE(x) mtmsr(x) 96 mtmsr(flags);
74#define raw_local_irq_restore(flags) mtmsr(flags)
75#endif 97#endif
98}
76 99
77static inline void raw_local_irq_disable(void) 100static inline unsigned long arch_local_irq_save(void)
78{ 101{
102 unsigned long flags = arch_local_save_flags();
79#ifdef CONFIG_BOOKE 103#ifdef CONFIG_BOOKE
80 __asm__ __volatile__("wrteei 0": : :"memory"); 104 asm volatile("wrteei 0" : : : "memory");
81#else 105#else
82 unsigned long msr; 106 SET_MSR_EE(flags & ~MSR_EE);
83
84 msr = mfmsr();
85 SET_MSR_EE(msr & ~MSR_EE);
86#endif 107#endif
108 return flags;
87} 109}
88 110
89static inline void raw_local_irq_enable(void) 111static inline void arch_local_irq_disable(void)
90{ 112{
91#ifdef CONFIG_BOOKE 113#ifdef CONFIG_BOOKE
92 __asm__ __volatile__("wrteei 1": : :"memory"); 114 asm volatile("wrteei 0" : : : "memory");
93#else 115#else
94 unsigned long msr; 116 arch_local_irq_save();
95
96 msr = mfmsr();
97 SET_MSR_EE(msr | MSR_EE);
98#endif 117#endif
99} 118}
100 119
101static inline void raw_local_irq_save_ptr(unsigned long *flags) 120static inline void arch_local_irq_enable(void)
102{ 121{
103 unsigned long msr;
104 msr = mfmsr();
105 *flags = msr;
106#ifdef CONFIG_BOOKE 122#ifdef CONFIG_BOOKE
107 __asm__ __volatile__("wrteei 0": : :"memory"); 123 asm volatile("wrteei 1" : : : "memory");
108#else 124#else
109 SET_MSR_EE(msr & ~MSR_EE); 125 unsigned long msr = mfmsr();
126 SET_MSR_EE(msr | MSR_EE);
110#endif 127#endif
111} 128}
112 129
113#define raw_local_save_flags(flags) ((flags) = mfmsr()) 130static inline bool arch_irqs_disabled_flags(unsigned long flags)
114#define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags)
115#define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0)
116#define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0)
117
118#define hard_irq_disable() raw_local_irq_disable()
119
120static inline int irqs_disabled_flags(unsigned long flags)
121{ 131{
122 return (flags & MSR_EE) == 0; 132 return (flags & MSR_EE) == 0;
123} 133}
124 134
135static inline bool arch_irqs_disabled(void)
136{
137 return arch_irqs_disabled_flags(arch_local_save_flags());
138}
139
140#define hard_irq_disable() arch_local_irq_disable()
141
125#endif /* CONFIG_PPC64 */ 142#endif /* CONFIG_PPC64 */
126 143
127/* 144/*
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h
index 5f68ecfdf516..b85d8ddbb666 100644
--- a/arch/powerpc/include/asm/irqflags.h
+++ b/arch/powerpc/include/asm/irqflags.h
@@ -6,7 +6,7 @@
6 6
7#ifndef __ASSEMBLY__ 7#ifndef __ASSEMBLY__
8/* 8/*
9 * Get definitions for raw_local_save_flags(x), etc. 9 * Get definitions for arch_local_save_flags(x), etc.
10 */ 10 */
11#include <asm/hw_irq.h> 11#include <asm/hw_irq.h>
12 12
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index f53029a01554..39b0c48872d2 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -818,12 +818,12 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
818 818
819 /* 819 /*
820 * hash_page couldn't handle it, set soft interrupt enable back 820 * hash_page couldn't handle it, set soft interrupt enable back
821 * to what it was before the trap. Note that .raw_local_irq_restore 821 * to what it was before the trap. Note that .arch_local_irq_restore
822 * handles any interrupts pending at this point. 822 * handles any interrupts pending at this point.
823 */ 823 */
824 ld r3,SOFTE(r1) 824 ld r3,SOFTE(r1)
825 TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) 825 TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
826 bl .raw_local_irq_restore 826 bl .arch_local_irq_restore
827 b 11f 827 b 11f
828 828
829/* We have a data breakpoint exception - handle it */ 829/* We have a data breakpoint exception - handle it */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 4a65386995d7..1903290f5469 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -116,7 +116,7 @@ static inline notrace void set_soft_enabled(unsigned long enable)
116 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); 116 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
117} 117}
118 118
119notrace void raw_local_irq_restore(unsigned long en) 119notrace void arch_local_irq_restore(unsigned long en)
120{ 120{
121 /* 121 /*
122 * get_paca()->soft_enabled = en; 122 * get_paca()->soft_enabled = en;
@@ -192,7 +192,7 @@ notrace void raw_local_irq_restore(unsigned long en)
192 192
193 __hard_irq_enable(); 193 __hard_irq_enable();
194} 194}
195EXPORT_SYMBOL(raw_local_irq_restore); 195EXPORT_SYMBOL(arch_local_irq_restore);
196#endif /* CONFIG_PPC64 */ 196#endif /* CONFIG_PPC64 */
197 197
198static int show_other_interrupts(struct seq_file *p, int prec) 198static int show_other_interrupts(struct seq_file *p, int prec)
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index 15b3ac253898..865d6d891ace 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -8,8 +8,8 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10 10
11/* store then or system mask. */ 11/* store then OR system mask. */
12#define __raw_local_irq_stosm(__or) \ 12#define __arch_local_irq_stosm(__or) \
13({ \ 13({ \
14 unsigned long __mask; \ 14 unsigned long __mask; \
15 asm volatile( \ 15 asm volatile( \
@@ -18,8 +18,8 @@
18 __mask; \ 18 __mask; \
19}) 19})
20 20
21/* store then and system mask. */ 21/* store then AND system mask. */
22#define __raw_local_irq_stnsm(__and) \ 22#define __arch_local_irq_stnsm(__and) \
23({ \ 23({ \
24 unsigned long __mask; \ 24 unsigned long __mask; \
25 asm volatile( \ 25 asm volatile( \
@@ -29,39 +29,44 @@
29}) 29})
30 30
31/* set system mask. */ 31/* set system mask. */
32#define __raw_local_irq_ssm(__mask) \ 32static inline void __arch_local_irq_ssm(unsigned long flags)
33({ \ 33{
34 asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \ 34 asm volatile("ssm %0" : : "Q" (flags) : "memory");
35}) 35}
36 36
37/* interrupt control.. */ 37static inline unsigned long arch_local_save_flags(void)
38static inline unsigned long raw_local_irq_enable(void)
39{ 38{
40 return __raw_local_irq_stosm(0x03); 39 return __arch_local_irq_stosm(0x00);
41} 40}
42 41
43static inline unsigned long raw_local_irq_disable(void) 42static inline unsigned long arch_local_irq_save(void)
44{ 43{
45 return __raw_local_irq_stnsm(0xfc); 44 return __arch_local_irq_stnsm(0xfc);
46} 45}
47 46
48#define raw_local_save_flags(x) \ 47static inline void arch_local_irq_disable(void)
49do { \ 48{
50 typecheck(unsigned long, x); \ 49 arch_local_irq_save();
51 (x) = __raw_local_irq_stosm(0x00); \ 50}
52} while (0)
53 51
54static inline void raw_local_irq_restore(unsigned long flags) 52static inline void arch_local_irq_enable(void)
55{ 53{
56 __raw_local_irq_ssm(flags); 54 __arch_local_irq_stosm(0x03);
57} 55}
58 56
59static inline int raw_irqs_disabled_flags(unsigned long flags) 57static inline void arch_local_irq_restore(unsigned long flags)
58{
59 __arch_local_irq_ssm(flags);
60}
61
62static inline bool arch_irqs_disabled_flags(unsigned long flags)
60{ 63{
61 return !(flags & (3UL << (BITS_PER_LONG - 8))); 64 return !(flags & (3UL << (BITS_PER_LONG - 8)));
62} 65}
63 66
64/* For spinlocks etc */ 67static inline bool arch_irqs_disabled(void)
65#define raw_local_irq_save(x) ((x) = raw_local_irq_disable()) 68{
69 return arch_irqs_disabled_flags(arch_local_save_flags());
70}
66 71
67#endif /* __ASM_IRQFLAGS_H */ 72#endif /* __ASM_IRQFLAGS_H */
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index cef66210c846..8e8a50eeed92 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -399,7 +399,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
399static inline void 399static inline void
400__set_psw_mask(unsigned long mask) 400__set_psw_mask(unsigned long mask)
401{ 401{
402 __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8))); 402 __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
403} 403}
404 404
405#define local_mcck_enable() __set_psw_mask(psw_kernel_bits) 405#define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c
index 559af0d07878..0fbe4e32f7ba 100644
--- a/arch/s390/kernel/mem_detect.c
+++ b/arch/s390/kernel/mem_detect.c
@@ -54,11 +54,11 @@ void detect_memory_layout(struct mem_chunk chunk[])
54 * right thing and we don't get scheduled away with low address 54 * right thing and we don't get scheduled away with low address
55 * protection disabled. 55 * protection disabled.
56 */ 56 */
57 flags = __raw_local_irq_stnsm(0xf8); 57 flags = __arch_local_irq_stnsm(0xf8);
58 __ctl_store(cr0, 0, 0); 58 __ctl_store(cr0, 0, 0);
59 __ctl_clear_bit(0, 28); 59 __ctl_clear_bit(0, 28);
60 find_memory_chunks(chunk); 60 find_memory_chunks(chunk);
61 __ctl_load(cr0, 0, 0); 61 __ctl_load(cr0, 0, 0);
62 __raw_local_irq_ssm(flags); 62 arch_local_irq_restore(flags);
63} 63}
64EXPORT_SYMBOL(detect_memory_layout); 64EXPORT_SYMBOL(detect_memory_layout);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 30eb6d02ddb8..94b8ba2ec857 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -50,7 +50,6 @@ EXPORT_SYMBOL(empty_zero_page);
50 */ 50 */
51void __init paging_init(void) 51void __init paging_init(void)
52{ 52{
53 static const int ssm_mask = 0x04000000L;
54 unsigned long max_zone_pfns[MAX_NR_ZONES]; 53 unsigned long max_zone_pfns[MAX_NR_ZONES];
55 unsigned long pgd_type; 54 unsigned long pgd_type;
56 55
@@ -72,7 +71,7 @@ void __init paging_init(void)
72 __ctl_load(S390_lowcore.kernel_asce, 1, 1); 71 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
73 __ctl_load(S390_lowcore.kernel_asce, 7, 7); 72 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
74 __ctl_load(S390_lowcore.kernel_asce, 13, 13); 73 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
75 __raw_local_irq_ssm(ssm_mask); 74 arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
76 75
77 atomic_set(&init_mm.context.attach_count, 1); 76 atomic_set(&init_mm.context.attach_count, 1);
78 77
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index a8c2af8c650f..71a4b0d34be0 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -71,7 +71,7 @@ int memcpy_real(void *dest, void *src, size_t count)
71 71
72 if (!count) 72 if (!count)
73 return 0; 73 return 0;
74 flags = __raw_local_irq_stnsm(0xf8UL); 74 flags = __arch_local_irq_stnsm(0xf8UL);
75 asm volatile ( 75 asm volatile (
76 "0: mvcle %1,%2,0x0\n" 76 "0: mvcle %1,%2,0x0\n"
77 "1: jo 0b\n" 77 "1: jo 0b\n"
@@ -82,6 +82,6 @@ int memcpy_real(void *dest, void *src, size_t count)
82 "+d" (_len2), "=m" (*((long *) dest)) 82 "+d" (_len2), "=m" (*((long *) dest))
83 : "m" (*((long *) src)) 83 : "m" (*((long *) src))
84 : "cc", "memory"); 84 : "cc", "memory");
85 __raw_local_irq_ssm(flags); 85 arch_local_irq_restore(flags);
86 return rc; 86 return rc;
87} 87}
diff --git a/arch/score/include/asm/irqflags.h b/arch/score/include/asm/irqflags.h
index 690a6cae7294..5c7563891e28 100644
--- a/arch/score/include/asm/irqflags.h
+++ b/arch/score/include/asm/irqflags.h
@@ -3,107 +3,118 @@
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
6#define raw_local_irq_save(x) \ 6#include <linux/types.h>
7{ \ 7
8 __asm__ __volatile__( \ 8static inline unsigned long arch_local_save_flags(void)
9 "mfcr r8, cr0;" \ 9{
10 "li r9, 0xfffffffe;" \ 10 unsigned long flags;
11 "nop;" \ 11
12 "mv %0, r8;" \ 12 asm volatile(
13 "and r8, r8, r9;" \ 13 " mfcr r8, cr0 \n"
14 "mtcr r8, cr0;" \ 14 " nop \n"
15 "nop;" \ 15 " nop \n"
16 "nop;" \ 16 " mv %0, r8 \n"
17 "nop;" \ 17 " nop \n"
18 "nop;" \ 18 " nop \n"
19 "nop;" \ 19 " nop \n"
20 : "=r" (x) \ 20 " nop \n"
21 : \ 21 " nop \n"
22 : "r8", "r9" \ 22 " ldi r9, 0x1 \n"
23 ); \ 23 " and %0, %0, r9 \n"
24 : "=r" (flags)
25 :
26 : "r8", "r9");
27 return flags;
24} 28}
25 29
26#define raw_local_irq_restore(x) \ 30static inline unsigned long arch_local_irq_save(void)
27{ \ 31{
28 __asm__ __volatile__( \ 32 unsigned long flags
29 "mfcr r8, cr0;" \ 33
30 "ldi r9, 0x1;" \ 34 asm volatile(
31 "and %0, %0, r9;" \ 35 " mfcr r8, cr0 \n"
32 "or r8, r8, %0;" \ 36 " li r9, 0xfffffffe \n"
33 "mtcr r8, cr0;" \ 37 " nop \n"
34 "nop;" \ 38 " mv %0, r8 \n"
35 "nop;" \ 39 " and r8, r8, r9 \n"
36 "nop;" \ 40 " mtcr r8, cr0 \n"
37 "nop;" \ 41 " nop \n"
38 "nop;" \ 42 " nop \n"
39 : \ 43 " nop \n"
40 : "r"(x) \ 44 " nop \n"
41 : "r8", "r9" \ 45 " nop \n"
42 ); \ 46 : "=r" (flags)
47 :
48 : "r8", "r9", "memory");
49
50 return flags;
43} 51}
44 52
45#define raw_local_irq_enable(void) \ 53static inline void arch_local_irq_restore(unsigned long flags)
46{ \ 54{
47 __asm__ __volatile__( \ 55 asm volatile(
48 "mfcr\tr8,cr0;" \ 56 " mfcr r8, cr0 \n"
49 "nop;" \ 57 " ldi r9, 0x1 \n"
50 "nop;" \ 58 " and %0, %0, r9 \n"
51 "ori\tr8,0x1;" \ 59 " or r8, r8, %0 \n"
52 "mtcr\tr8,cr0;" \ 60 " mtcr r8, cr0 \n"
53 "nop;" \ 61 " nop \n"
54 "nop;" \ 62 " nop \n"
55 "nop;" \ 63 " nop \n"
56 "nop;" \ 64 " nop \n"
57 "nop;" \ 65 " nop \n"
58 : \ 66 :
59 : \ 67 : "r"(flags)
60 : "r8"); \ 68 : "r8", "r9", "memory");
61} 69}
62 70
63#define raw_local_irq_disable(void) \ 71static inline void arch_local_irq_enable(void)
64{ \ 72{
65 __asm__ __volatile__( \ 73 asm volatile(
66 "mfcr\tr8,cr0;" \ 74 " mfcr r8,cr0 \n"
67 "nop;" \ 75 " nop \n"
68 "nop;" \ 76 " nop \n"
69 "srli\tr8,r8,1;" \ 77 " ori r8,0x1 \n"
70 "slli\tr8,r8,1;" \ 78 " mtcr r8,cr0 \n"
71 "mtcr\tr8,cr0;" \ 79 " nop \n"
72 "nop;" \ 80 " nop \n"
73 "nop;" \ 81 " nop \n"
74 "nop;" \ 82 " nop \n"
75 "nop;" \ 83 " nop \n"
76 "nop;" \ 84 :
77 : \ 85 :
78 : \ 86 : "r8", "memory");
79 : "r8"); \
80} 87}
81 88
82#define raw_local_save_flags(x) \ 89static inline void arch_local_irq_disable(void)
83{ \ 90{
84 __asm__ __volatile__( \ 91 asm volatile(
85 "mfcr r8, cr0;" \ 92 " mfcr r8,cr0 \n"
86 "nop;" \ 93 " nop \n"
87 "nop;" \ 94 " nop \n"
88 "mv %0, r8;" \ 95 " srli r8,r8,1 \n"
89 "nop;" \ 96 " slli r8,r8,1 \n"
90 "nop;" \ 97 " mtcr r8,cr0 \n"
91 "nop;" \ 98 " nop \n"
92 "nop;" \ 99 " nop \n"
93 "nop;" \ 100 " nop \n"
94 "ldi r9, 0x1;" \ 101 " nop \n"
95 "and %0, %0, r9;" \ 102 " nop \n"
96 : "=r" (x) \ 103 :
97 : \ 104 :
98 : "r8", "r9" \ 105 : "r8", "memory");
99 ); \
100} 106}
101 107
102static inline int raw_irqs_disabled_flags(unsigned long flags) 108static inline bool arch_irqs_disabled_flags(unsigned long flags)
103{ 109{
104 return !(flags & 1); 110 return !(flags & 1);
105} 111}
106 112
107#endif 113static inline bool arch_irqs_disabled(void)
114{
115 return arch_irqs_disabled_flags(arch_local_save_flags());
116}
117
118#endif /* __ASSEMBLY__ */
108 119
109#endif /* _ASM_SCORE_IRQFLAGS_H */ 120#endif /* _ASM_SCORE_IRQFLAGS_H */
diff --git a/arch/sh/include/asm/irqflags.h b/arch/sh/include/asm/irqflags.h
index a741153b41c2..43b7608606c3 100644
--- a/arch/sh/include/asm/irqflags.h
+++ b/arch/sh/include/asm/irqflags.h
@@ -1,8 +1,8 @@
1#ifndef __ASM_SH_IRQFLAGS_H 1#ifndef __ASM_SH_IRQFLAGS_H
2#define __ASM_SH_IRQFLAGS_H 2#define __ASM_SH_IRQFLAGS_H
3 3
4#define RAW_IRQ_DISABLED 0xf0 4#define ARCH_IRQ_DISABLED 0xf0
5#define RAW_IRQ_ENABLED 0x00 5#define ARCH_IRQ_ENABLED 0x00
6 6
7#include <asm-generic/irqflags.h> 7#include <asm-generic/irqflags.h>
8 8
diff --git a/arch/sh/kernel/irq_32.c b/arch/sh/kernel/irq_32.c
index e33ab15831f9..e5a755be9129 100644
--- a/arch/sh/kernel/irq_32.c
+++ b/arch/sh/kernel/irq_32.c
@@ -10,11 +10,11 @@
10#include <linux/irqflags.h> 10#include <linux/irqflags.h>
11#include <linux/module.h> 11#include <linux/module.h>
12 12
13void notrace raw_local_irq_restore(unsigned long flags) 13void notrace arch_local_irq_restore(unsigned long flags)
14{ 14{
15 unsigned long __dummy0, __dummy1; 15 unsigned long __dummy0, __dummy1;
16 16
17 if (flags == RAW_IRQ_DISABLED) { 17 if (flags == ARCH_IRQ_DISABLED) {
18 __asm__ __volatile__ ( 18 __asm__ __volatile__ (
19 "stc sr, %0\n\t" 19 "stc sr, %0\n\t"
20 "or #0xf0, %0\n\t" 20 "or #0xf0, %0\n\t"
@@ -33,14 +33,14 @@ void notrace raw_local_irq_restore(unsigned long flags)
33#endif 33#endif
34 "ldc %0, sr\n\t" 34 "ldc %0, sr\n\t"
35 : "=&r" (__dummy0), "=r" (__dummy1) 35 : "=&r" (__dummy0), "=r" (__dummy1)
36 : "1" (~RAW_IRQ_DISABLED) 36 : "1" (~ARCH_IRQ_DISABLED)
37 : "memory" 37 : "memory"
38 ); 38 );
39 } 39 }
40} 40}
41EXPORT_SYMBOL(raw_local_irq_restore); 41EXPORT_SYMBOL(arch_local_irq_restore);
42 42
43unsigned long notrace __raw_local_save_flags(void) 43unsigned long notrace arch_local_save_flags(void)
44{ 44{
45 unsigned long flags; 45 unsigned long flags;
46 46
@@ -54,4 +54,4 @@ unsigned long notrace __raw_local_save_flags(void)
54 54
55 return flags; 55 return flags;
56} 56}
57EXPORT_SYMBOL(__raw_local_save_flags); 57EXPORT_SYMBOL(arch_local_save_flags);
diff --git a/arch/sparc/include/asm/irqflags_32.h b/arch/sparc/include/asm/irqflags_32.h
index 0fca9d97d44f..d4d0711de0f9 100644
--- a/arch/sparc/include/asm/irqflags_32.h
+++ b/arch/sparc/include/asm/irqflags_32.h
@@ -5,33 +5,40 @@
5 * 5 *
6 * This file gets included from lowlevel asm headers too, to provide 6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the 7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers. 8 * arch_local_irq_*() functions from the lowlevel headers.
9 */ 9 */
10#ifndef _ASM_IRQFLAGS_H 10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H 11#define _ASM_IRQFLAGS_H
12 12
13#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
14 14
15extern void raw_local_irq_restore(unsigned long); 15#include <linux/types.h>
16extern unsigned long __raw_local_irq_save(void);
17extern void raw_local_irq_enable(void);
18 16
19static inline unsigned long getipl(void) 17extern void arch_local_irq_restore(unsigned long);
18extern unsigned long arch_local_irq_save(void);
19extern void arch_local_irq_enable(void);
20
21static inline unsigned long arch_local_save_flags(void)
20{ 22{
21 unsigned long retval; 23 unsigned long flags;
24
25 asm volatile("rd %%psr, %0" : "=r" (flags));
26 return flags;
27}
22 28
23 __asm__ __volatile__("rd %%psr, %0" : "=r" (retval)); 29static inline void arch_local_irq_disable(void)
24 return retval; 30{
31 arch_local_irq_save();
25} 32}
26 33
27#define raw_local_save_flags(flags) ((flags) = getipl()) 34static inline bool arch_irqs_disabled_flags(unsigned long flags)
28#define raw_local_irq_save(flags) ((flags) = __raw_local_irq_save()) 35{
29#define raw_local_irq_disable() ((void) __raw_local_irq_save()) 36 return (flags & PSR_PIL) != 0;
30#define raw_irqs_disabled() ((getipl() & PSR_PIL) != 0) 37}
31 38
32static inline int raw_irqs_disabled_flags(unsigned long flags) 39static inline bool arch_irqs_disabled(void)
33{ 40{
34 return ((flags & PSR_PIL) != 0); 41 return arch_irqs_disabled_flags(arch_local_save_flags());
35} 42}
36 43
37#endif /* (__ASSEMBLY__) */ 44#endif /* (__ASSEMBLY__) */
diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h
index bfa1ea45b4cd..aab969c82c2b 100644
--- a/arch/sparc/include/asm/irqflags_64.h
+++ b/arch/sparc/include/asm/irqflags_64.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * This file gets included from lowlevel asm headers too, to provide 6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the 7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers. 8 * arch_local_irq_*() functions from the lowlevel headers.
9 */ 9 */
10#ifndef _ASM_IRQFLAGS_H 10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H 11#define _ASM_IRQFLAGS_H
@@ -14,7 +14,7 @@
14 14
15#ifndef __ASSEMBLY__ 15#ifndef __ASSEMBLY__
16 16
17static inline unsigned long __raw_local_save_flags(void) 17static inline unsigned long arch_local_save_flags(void)
18{ 18{
19 unsigned long flags; 19 unsigned long flags;
20 20
@@ -26,10 +26,7 @@ static inline unsigned long __raw_local_save_flags(void)
26 return flags; 26 return flags;
27} 27}
28 28
29#define raw_local_save_flags(flags) \ 29static inline void arch_local_irq_restore(unsigned long flags)
30 do { (flags) = __raw_local_save_flags(); } while (0)
31
32static inline void raw_local_irq_restore(unsigned long flags)
33{ 30{
34 __asm__ __volatile__( 31 __asm__ __volatile__(
35 "wrpr %0, %%pil" 32 "wrpr %0, %%pil"
@@ -39,7 +36,7 @@ static inline void raw_local_irq_restore(unsigned long flags)
39 ); 36 );
40} 37}
41 38
42static inline void raw_local_irq_disable(void) 39static inline void arch_local_irq_disable(void)
43{ 40{
44 __asm__ __volatile__( 41 __asm__ __volatile__(
45 "wrpr %0, %%pil" 42 "wrpr %0, %%pil"
@@ -49,7 +46,7 @@ static inline void raw_local_irq_disable(void)
49 ); 46 );
50} 47}
51 48
52static inline void raw_local_irq_enable(void) 49static inline void arch_local_irq_enable(void)
53{ 50{
54 __asm__ __volatile__( 51 __asm__ __volatile__(
55 "wrpr 0, %%pil" 52 "wrpr 0, %%pil"
@@ -59,22 +56,17 @@ static inline void raw_local_irq_enable(void)
59 ); 56 );
60} 57}
61 58
62static inline int raw_irqs_disabled_flags(unsigned long flags) 59static inline int arch_irqs_disabled_flags(unsigned long flags)
63{ 60{
64 return (flags > 0); 61 return (flags > 0);
65} 62}
66 63
67static inline int raw_irqs_disabled(void) 64static inline int arch_irqs_disabled(void)
68{ 65{
69 unsigned long flags = __raw_local_save_flags(); 66 return arch_irqs_disabled_flags(arch_local_save_flags());
70
71 return raw_irqs_disabled_flags(flags);
72} 67}
73 68
74/* 69static inline unsigned long arch_local_irq_save(void)
75 * For spinlocks, etc:
76 */
77static inline unsigned long __raw_local_irq_save(void)
78{ 70{
79 unsigned long flags, tmp; 71 unsigned long flags, tmp;
80 72
@@ -100,9 +92,6 @@ static inline unsigned long __raw_local_irq_save(void)
100 return flags; 92 return flags;
101} 93}
102 94
103#define raw_local_irq_save(flags) \
104 do { (flags) = __raw_local_irq_save(); } while (0)
105
106#endif /* (__ASSEMBLY__) */ 95#endif /* (__ASSEMBLY__) */
107 96
108#endif /* !(_ASM_IRQFLAGS_H) */ 97#endif /* !(_ASM_IRQFLAGS_H) */
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index e1af43728329..0116d8d10def 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -57,7 +57,7 @@
57#define SMP_NOP2 57#define SMP_NOP2
58#define SMP_NOP3 58#define SMP_NOP3
59#endif /* SMP */ 59#endif /* SMP */
60unsigned long __raw_local_irq_save(void) 60unsigned long arch_local_irq_save(void)
61{ 61{
62 unsigned long retval; 62 unsigned long retval;
63 unsigned long tmp; 63 unsigned long tmp;
@@ -74,8 +74,9 @@ unsigned long __raw_local_irq_save(void)
74 74
75 return retval; 75 return retval;
76} 76}
77EXPORT_SYMBOL(arch_local_irq_save);
77 78
78void raw_local_irq_enable(void) 79void arch_local_irq_enable(void)
79{ 80{
80 unsigned long tmp; 81 unsigned long tmp;
81 82
@@ -89,8 +90,9 @@ void raw_local_irq_enable(void)
89 : "i" (PSR_PIL) 90 : "i" (PSR_PIL)
90 : "memory"); 91 : "memory");
91} 92}
93EXPORT_SYMBOL(arch_local_irq_enable);
92 94
93void raw_local_irq_restore(unsigned long old_psr) 95void arch_local_irq_restore(unsigned long old_psr)
94{ 96{
95 unsigned long tmp; 97 unsigned long tmp;
96 98
@@ -105,10 +107,7 @@ void raw_local_irq_restore(unsigned long old_psr)
105 : "i" (PSR_PIL), "r" (old_psr) 107 : "i" (PSR_PIL), "r" (old_psr)
106 : "memory"); 108 : "memory");
107} 109}
108 110EXPORT_SYMBOL(arch_local_irq_restore);
109EXPORT_SYMBOL(__raw_local_irq_save);
110EXPORT_SYMBOL(raw_local_irq_enable);
111EXPORT_SYMBOL(raw_local_irq_restore);
112 111
113/* 112/*
114 * Dave Redman (djhr@tadpole.co.uk) 113 * Dave Redman (djhr@tadpole.co.uk)
diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
index fa6e4e219b9c..d9850c2b9bf2 100644
--- a/arch/sparc/prom/p1275.c
+++ b/arch/sparc/prom/p1275.c
@@ -39,7 +39,7 @@ void p1275_cmd_direct(unsigned long *args)
39 unsigned long flags; 39 unsigned long flags;
40 40
41 raw_local_save_flags(flags); 41 raw_local_save_flags(flags);
42 raw_local_irq_restore(PIL_NMI); 42 raw_local_irq_restore((unsigned long)PIL_NMI);
43 raw_spin_lock(&prom_entry_lock); 43 raw_spin_lock(&prom_entry_lock);
44 44
45 prom_world(1); 45 prom_world(1);
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index 45cf67c2f286..a11d4837ee4d 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -103,55 +103,57 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
103#define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) 103#define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR)
104 104
105/* Disable interrupts. */ 105/* Disable interrupts. */
106#define raw_local_irq_disable() \ 106#define arch_local_irq_disable() \
107 interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS) 107 interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS)
108 108
109/* Disable all interrupts, including NMIs. */ 109/* Disable all interrupts, including NMIs. */
110#define raw_local_irq_disable_all() \ 110#define arch_local_irq_disable_all() \
111 interrupt_mask_set_mask(-1UL) 111 interrupt_mask_set_mask(-1UL)
112 112
113/* Re-enable all maskable interrupts. */ 113/* Re-enable all maskable interrupts. */
114#define raw_local_irq_enable() \ 114#define arch_local_irq_enable() \
115 interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask)) 115 interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask))
116 116
117/* Disable or enable interrupts based on flag argument. */ 117/* Disable or enable interrupts based on flag argument. */
118#define raw_local_irq_restore(disabled) do { \ 118#define arch_local_irq_restore(disabled) do { \
119 if (disabled) \ 119 if (disabled) \
120 raw_local_irq_disable(); \ 120 arch_local_irq_disable(); \
121 else \ 121 else \
122 raw_local_irq_enable(); \ 122 arch_local_irq_enable(); \
123} while (0) 123} while (0)
124 124
125/* Return true if "flags" argument means interrupts are disabled. */ 125/* Return true if "flags" argument means interrupts are disabled. */
126#define raw_irqs_disabled_flags(flags) ((flags) != 0) 126#define arch_irqs_disabled_flags(flags) ((flags) != 0)
127 127
128/* Return true if interrupts are currently disabled. */ 128/* Return true if interrupts are currently disabled. */
129#define raw_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR) 129#define arch_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR)
130 130
131/* Save whether interrupts are currently disabled. */ 131/* Save whether interrupts are currently disabled. */
132#define raw_local_save_flags(flags) ((flags) = raw_irqs_disabled()) 132#define arch_local_save_flags() arch_irqs_disabled()
133 133
134/* Save whether interrupts are currently disabled, then disable them. */ 134/* Save whether interrupts are currently disabled, then disable them. */
135#define raw_local_irq_save(flags) \ 135#define arch_local_irq_save() ({ \
136 do { raw_local_save_flags(flags); raw_local_irq_disable(); } while (0) 136 unsigned long __flags = arch_local_save_flags(); \
137 arch_local_irq_disable(); \
138 __flags; })
137 139
138/* Prevent the given interrupt from being enabled next time we enable irqs. */ 140/* Prevent the given interrupt from being enabled next time we enable irqs. */
139#define raw_local_irq_mask(interrupt) \ 141#define arch_local_irq_mask(interrupt) \
140 (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) 142 (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt))
141 143
142/* Prevent the given interrupt from being enabled immediately. */ 144/* Prevent the given interrupt from being enabled immediately. */
143#define raw_local_irq_mask_now(interrupt) do { \ 145#define arch_local_irq_mask_now(interrupt) do { \
144 raw_local_irq_mask(interrupt); \ 146 arch_local_irq_mask(interrupt); \
145 interrupt_mask_set(interrupt); \ 147 interrupt_mask_set(interrupt); \
146} while (0) 148} while (0)
147 149
148/* Allow the given interrupt to be enabled next time we enable irqs. */ 150/* Allow the given interrupt to be enabled next time we enable irqs. */
149#define raw_local_irq_unmask(interrupt) \ 151#define arch_local_irq_unmask(interrupt) \
150 (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) 152 (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt))
151 153
152/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ 154/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
153#define raw_local_irq_unmask_now(interrupt) do { \ 155#define arch_local_irq_unmask_now(interrupt) do { \
154 raw_local_irq_unmask(interrupt); \ 156 arch_local_irq_unmask(interrupt); \
155 if (!irqs_disabled()) \ 157 if (!irqs_disabled()) \
156 interrupt_mask_reset(interrupt); \ 158 interrupt_mask_reset(interrupt); \
157} while (0) 159} while (0)
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 9e2b952f810a..5745ce8bf108 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -61,22 +61,22 @@ static inline void native_halt(void)
61#else 61#else
62#ifndef __ASSEMBLY__ 62#ifndef __ASSEMBLY__
63 63
64static inline unsigned long __raw_local_save_flags(void) 64static inline unsigned long arch_local_save_flags(void)
65{ 65{
66 return native_save_fl(); 66 return native_save_fl();
67} 67}
68 68
69static inline void raw_local_irq_restore(unsigned long flags) 69static inline void arch_local_irq_restore(unsigned long flags)
70{ 70{
71 native_restore_fl(flags); 71 native_restore_fl(flags);
72} 72}
73 73
74static inline void raw_local_irq_disable(void) 74static inline void arch_local_irq_disable(void)
75{ 75{
76 native_irq_disable(); 76 native_irq_disable();
77} 77}
78 78
79static inline void raw_local_irq_enable(void) 79static inline void arch_local_irq_enable(void)
80{ 80{
81 native_irq_enable(); 81 native_irq_enable();
82} 82}
@@ -85,7 +85,7 @@ static inline void raw_local_irq_enable(void)
85 * Used in the idle loop; sti takes one instruction cycle 85 * Used in the idle loop; sti takes one instruction cycle
86 * to complete: 86 * to complete:
87 */ 87 */
88static inline void raw_safe_halt(void) 88static inline void arch_safe_halt(void)
89{ 89{
90 native_safe_halt(); 90 native_safe_halt();
91} 91}
@@ -102,12 +102,10 @@ static inline void halt(void)
102/* 102/*
103 * For spinlocks, etc: 103 * For spinlocks, etc:
104 */ 104 */
105static inline unsigned long __raw_local_irq_save(void) 105static inline unsigned long arch_local_irq_save(void)
106{ 106{
107 unsigned long flags = __raw_local_save_flags(); 107 unsigned long flags = arch_local_save_flags();
108 108 arch_local_irq_disable();
109 raw_local_irq_disable();
110
111 return flags; 109 return flags;
112} 110}
113#else 111#else
@@ -153,22 +151,16 @@ static inline unsigned long __raw_local_irq_save(void)
153#endif /* CONFIG_PARAVIRT */ 151#endif /* CONFIG_PARAVIRT */
154 152
155#ifndef __ASSEMBLY__ 153#ifndef __ASSEMBLY__
156#define raw_local_save_flags(flags) \ 154static inline int arch_irqs_disabled_flags(unsigned long flags)
157 do { (flags) = __raw_local_save_flags(); } while (0)
158
159#define raw_local_irq_save(flags) \
160 do { (flags) = __raw_local_irq_save(); } while (0)
161
162static inline int raw_irqs_disabled_flags(unsigned long flags)
163{ 155{
164 return !(flags & X86_EFLAGS_IF); 156 return !(flags & X86_EFLAGS_IF);
165} 157}
166 158
167static inline int raw_irqs_disabled(void) 159static inline int arch_irqs_disabled(void)
168{ 160{
169 unsigned long flags = __raw_local_save_flags(); 161 unsigned long flags = arch_local_save_flags();
170 162
171 return raw_irqs_disabled_flags(flags); 163 return arch_irqs_disabled_flags(flags);
172} 164}
173 165
174#else 166#else
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 5653f43d90e5..499954c530da 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -105,7 +105,7 @@ static inline void write_cr8(unsigned long x)
105} 105}
106#endif 106#endif
107 107
108static inline void raw_safe_halt(void) 108static inline void arch_safe_halt(void)
109{ 109{
110 PVOP_VCALL0(pv_irq_ops.safe_halt); 110 PVOP_VCALL0(pv_irq_ops.safe_halt);
111} 111}
@@ -829,32 +829,32 @@ static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
829#define __PV_IS_CALLEE_SAVE(func) \ 829#define __PV_IS_CALLEE_SAVE(func) \
830 ((struct paravirt_callee_save) { func }) 830 ((struct paravirt_callee_save) { func })
831 831
832static inline unsigned long __raw_local_save_flags(void) 832static inline unsigned long arch_local_save_flags(void)
833{ 833{
834 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); 834 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
835} 835}
836 836
837static inline void raw_local_irq_restore(unsigned long f) 837static inline void arch_local_irq_restore(unsigned long f)
838{ 838{
839 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); 839 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
840} 840}
841 841
842static inline void raw_local_irq_disable(void) 842static inline void arch_local_irq_disable(void)
843{ 843{
844 PVOP_VCALLEE0(pv_irq_ops.irq_disable); 844 PVOP_VCALLEE0(pv_irq_ops.irq_disable);
845} 845}
846 846
847static inline void raw_local_irq_enable(void) 847static inline void arch_local_irq_enable(void)
848{ 848{
849 PVOP_VCALLEE0(pv_irq_ops.irq_enable); 849 PVOP_VCALLEE0(pv_irq_ops.irq_enable);
850} 850}
851 851
852static inline unsigned long __raw_local_irq_save(void) 852static inline unsigned long arch_local_irq_save(void)
853{ 853{
854 unsigned long f; 854 unsigned long f;
855 855
856 f = __raw_local_save_flags(); 856 f = arch_local_save_flags();
857 raw_local_irq_disable(); 857 arch_local_irq_disable();
858 return f; 858 return f;
859} 859}
860 860
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index e0500646585d..23e061b9327b 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -224,7 +224,7 @@ static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enab
224 goto out; 224 goto out;
225 } 225 }
226 226
227 flags = __raw_local_save_flags(); 227 flags = arch_local_save_flags();
228 if (irq_enable) { 228 if (irq_enable) {
229 ADD_STATS(taken_slow_irqenable, 1); 229 ADD_STATS(taken_slow_irqenable, 1);
230 raw_local_irq_enable(); 230 raw_local_irq_enable();
diff --git a/arch/xtensa/include/asm/irqflags.h b/arch/xtensa/include/asm/irqflags.h
new file mode 100644
index 000000000000..dae9a8bdcb17
--- /dev/null
+++ b/arch/xtensa/include/asm/irqflags.h
@@ -0,0 +1,58 @@
1/*
2 * Xtensa IRQ flags handling functions
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 */
10
11#ifndef _XTENSA_IRQFLAGS_H
12#define _XTENSA_IRQFLAGS_H
13
14#include <linux/types.h>
15
16static inline unsigned long arch_local_save_flags(void)
17{
18 unsigned long flags;
19 asm volatile("rsr %0,"__stringify(PS) : "=a" (flags));
20 return flags;
21}
22
23static inline unsigned long arch_local_irq_save(void)
24{
25 unsigned long flags;
26 asm volatile("rsil %0, "__stringify(LOCKLEVEL)
27 : "=a" (flags) :: "memory");
28 return flags;
29}
30
31static inline void arch_local_irq_disable(void)
32{
33 arch_local_irq_save();
34}
35
36static inline void arch_local_irq_enable(void)
37{
38 unsigned long flags;
39 asm volatile("rsil %0, 0" : "=a" (flags) :: "memory");
40}
41
42static inline void arch_local_irq_restore(unsigned long flags)
43{
44 asm volatile("wsr %0, "__stringify(PS)" ; rsync"
45 :: "a" (flags) : "memory");
46}
47
48static inline bool arch_irqs_disabled_flags(unsigned long flags)
49{
50 return (flags & 0xf) != 0;
51}
52
53static inline bool arch_irqs_disabled(void)
54{
55 return arch_irqs_disabled_flags(arch_local_save_flags());
56}
57
58#endif /* _XTENSA_IRQFLAGS_H */
diff --git a/arch/xtensa/include/asm/system.h b/arch/xtensa/include/asm/system.h
index 62b1e8f3c13c..1e7e09ab6cd7 100644
--- a/arch/xtensa/include/asm/system.h
+++ b/arch/xtensa/include/asm/system.h
@@ -12,41 +12,10 @@
12#define _XTENSA_SYSTEM_H 12#define _XTENSA_SYSTEM_H
13 13
14#include <linux/stringify.h> 14#include <linux/stringify.h>
15#include <linux/irqflags.h>
15 16
16#include <asm/processor.h> 17#include <asm/processor.h>
17 18
18/* interrupt control */
19
20#define local_save_flags(x) \
21 __asm__ __volatile__ ("rsr %0,"__stringify(PS) : "=a" (x));
22#define local_irq_restore(x) do { \
23 __asm__ __volatile__ ("wsr %0, "__stringify(PS)" ; rsync" \
24 :: "a" (x) : "memory"); } while(0);
25#define local_irq_save(x) do { \
26 __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) \
27 : "=a" (x) :: "memory");} while(0);
28
29static inline void local_irq_disable(void)
30{
31 unsigned long flags;
32 __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL)
33 : "=a" (flags) :: "memory");
34}
35static inline void local_irq_enable(void)
36{
37 unsigned long flags;
38 __asm__ __volatile__ ("rsil %0, 0" : "=a" (flags) :: "memory");
39
40}
41
42static inline int irqs_disabled(void)
43{
44 unsigned long flags;
45 local_save_flags(flags);
46 return flags & 0xf;
47}
48
49
50#define smp_read_barrier_depends() do { } while(0) 19#define smp_read_barrier_depends() do { } while(0)
51#define read_barrier_depends() do { } while(0) 20#define read_barrier_depends() do { } while(0)
52 21
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index f6d72e1f2a38..5707a80b96b6 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -468,7 +468,7 @@ sclp_sync_wait(void)
468 cr0_sync &= 0xffff00a0; 468 cr0_sync &= 0xffff00a0;
469 cr0_sync |= 0x00000200; 469 cr0_sync |= 0x00000200;
470 __ctl_load(cr0_sync, 0, 0); 470 __ctl_load(cr0_sync, 0, 0);
471 __raw_local_irq_stosm(0x01); 471 __arch_local_irq_stosm(0x01);
472 /* Loop until driver state indicates finished request */ 472 /* Loop until driver state indicates finished request */
473 while (sclp_running_state != sclp_running_state_idle) { 473 while (sclp_running_state != sclp_running_state_idle) {
474 /* Check for expired request timer */ 474 /* Check for expired request timer */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index e53347fbf1da..fd57b8477fab 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -43,6 +43,7 @@
43 */ 43 */
44#define atomic_set(v, i) (((v)->counter) = (i)) 44#define atomic_set(v, i) (((v)->counter) = (i))
45 45
46#include <linux/irqflags.h>
46#include <asm/system.h> 47#include <asm/system.h>
47 48
48/** 49/**
@@ -57,7 +58,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
57 unsigned long flags; 58 unsigned long flags;
58 int temp; 59 int temp;
59 60
60 raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ 61 raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
61 temp = v->counter; 62 temp = v->counter;
62 temp += i; 63 temp += i;
63 v->counter = temp; 64 v->counter = temp;
@@ -78,7 +79,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
78 unsigned long flags; 79 unsigned long flags;
79 int temp; 80 int temp;
80 81
81 raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ 82 raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
82 temp = v->counter; 83 temp = v->counter;
83 temp -= i; 84 temp -= i;
84 v->counter = temp; 85 v->counter = temp;
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
index b2ba2fc8829a..2533fddd34a6 100644
--- a/include/asm-generic/cmpxchg-local.h
+++ b/include/asm-generic/cmpxchg-local.h
@@ -2,6 +2,7 @@
2#define __ASM_GENERIC_CMPXCHG_LOCAL_H 2#define __ASM_GENERIC_CMPXCHG_LOCAL_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/irqflags.h>
5 6
6extern unsigned long wrong_size_cmpxchg(volatile void *ptr); 7extern unsigned long wrong_size_cmpxchg(volatile void *ptr);
7 8
diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h
index 62f59080e5cc..c0771aa248cf 100644
--- a/include/asm-generic/hardirq.h
+++ b/include/asm-generic/hardirq.h
@@ -3,7 +3,6 @@
3 3
4#include <linux/cache.h> 4#include <linux/cache.h>
5#include <linux/threads.h> 5#include <linux/threads.h>
6#include <linux/irq.h>
7 6
8typedef struct { 7typedef struct {
9 unsigned int __softirq_pending; 8 unsigned int __softirq_pending;
diff --git a/include/asm-generic/irqflags.h b/include/asm-generic/irqflags.h
index 9aebf618275a..1f40d0024cf3 100644
--- a/include/asm-generic/irqflags.h
+++ b/include/asm-generic/irqflags.h
@@ -5,68 +5,62 @@
5 * All architectures should implement at least the first two functions, 5 * All architectures should implement at least the first two functions,
6 * usually inline assembly will be the best way. 6 * usually inline assembly will be the best way.
7 */ 7 */
8#ifndef RAW_IRQ_DISABLED 8#ifndef ARCH_IRQ_DISABLED
9#define RAW_IRQ_DISABLED 0 9#define ARCH_IRQ_DISABLED 0
10#define RAW_IRQ_ENABLED 1 10#define ARCH_IRQ_ENABLED 1
11#endif 11#endif
12 12
13/* read interrupt enabled status */ 13/* read interrupt enabled status */
14#ifndef __raw_local_save_flags 14#ifndef arch_local_save_flags
15unsigned long __raw_local_save_flags(void); 15unsigned long arch_local_save_flags(void);
16#endif 16#endif
17 17
18/* set interrupt enabled status */ 18/* set interrupt enabled status */
19#ifndef raw_local_irq_restore 19#ifndef arch_local_irq_restore
20void raw_local_irq_restore(unsigned long flags); 20void arch_local_irq_restore(unsigned long flags);
21#endif 21#endif
22 22
23/* get status and disable interrupts */ 23/* get status and disable interrupts */
24#ifndef __raw_local_irq_save 24#ifndef arch_local_irq_save
25static inline unsigned long __raw_local_irq_save(void) 25static inline unsigned long arch_local_irq_save(void)
26{ 26{
27 unsigned long flags; 27 unsigned long flags;
28 flags = __raw_local_save_flags(); 28 flags = arch_local_save_flags();
29 raw_local_irq_restore(RAW_IRQ_DISABLED); 29 arch_local_irq_restore(ARCH_IRQ_DISABLED);
30 return flags; 30 return flags;
31} 31}
32#endif 32#endif
33 33
34/* test flags */ 34/* test flags */
35#ifndef raw_irqs_disabled_flags 35#ifndef arch_irqs_disabled_flags
36static inline int raw_irqs_disabled_flags(unsigned long flags) 36static inline int arch_irqs_disabled_flags(unsigned long flags)
37{ 37{
38 return flags == RAW_IRQ_DISABLED; 38 return flags == ARCH_IRQ_DISABLED;
39} 39}
40#endif 40#endif
41 41
42/* unconditionally enable interrupts */ 42/* unconditionally enable interrupts */
43#ifndef raw_local_irq_enable 43#ifndef arch_local_irq_enable
44static inline void raw_local_irq_enable(void) 44static inline void arch_local_irq_enable(void)
45{ 45{
46 raw_local_irq_restore(RAW_IRQ_ENABLED); 46 arch_local_irq_restore(ARCH_IRQ_ENABLED);
47} 47}
48#endif 48#endif
49 49
50/* unconditionally disable interrupts */ 50/* unconditionally disable interrupts */
51#ifndef raw_local_irq_disable 51#ifndef arch_local_irq_disable
52static inline void raw_local_irq_disable(void) 52static inline void arch_local_irq_disable(void)
53{ 53{
54 raw_local_irq_restore(RAW_IRQ_DISABLED); 54 arch_local_irq_restore(ARCH_IRQ_DISABLED);
55} 55}
56#endif 56#endif
57 57
58/* test hardware interrupt enable bit */ 58/* test hardware interrupt enable bit */
59#ifndef raw_irqs_disabled 59#ifndef arch_irqs_disabled
60static inline int raw_irqs_disabled(void) 60static inline int arch_irqs_disabled(void)
61{ 61{
62 return raw_irqs_disabled_flags(__raw_local_save_flags()); 62 return arch_irqs_disabled_flags(arch_local_save_flags());
63} 63}
64#endif 64#endif
65 65
66#define raw_local_save_flags(flags) \
67 do { (flags) = __raw_local_save_flags(); } while (0)
68
69#define raw_local_irq_save(flags) \
70 do { (flags) = __raw_local_irq_save(); } while (0)
71
72#endif /* __ASM_GENERIC_IRQFLAGS_H */ 66#endif /* __ASM_GENERIC_IRQFLAGS_H */
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 006bf45eae30..d176d658fe25 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -12,6 +12,7 @@
12#define _LINUX_TRACE_IRQFLAGS_H 12#define _LINUX_TRACE_IRQFLAGS_H
13 13
14#include <linux/typecheck.h> 14#include <linux/typecheck.h>
15#include <asm/irqflags.h>
15 16
16#ifdef CONFIG_TRACE_IRQFLAGS 17#ifdef CONFIG_TRACE_IRQFLAGS
17 extern void trace_softirqs_on(unsigned long ip); 18 extern void trace_softirqs_on(unsigned long ip);
@@ -52,17 +53,45 @@
52# define start_critical_timings() do { } while (0) 53# define start_critical_timings() do { } while (0)
53#endif 54#endif
54 55
55#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 56/*
56 57 * Wrap the arch provided IRQ routines to provide appropriate checks.
57#include <asm/irqflags.h> 58 */
59#define raw_local_irq_disable() arch_local_irq_disable()
60#define raw_local_irq_enable() arch_local_irq_enable()
61#define raw_local_irq_save(flags) \
62 do { \
63 typecheck(unsigned long, flags); \
64 flags = arch_local_irq_save(); \
65 } while (0)
66#define raw_local_irq_restore(flags) \
67 do { \
68 typecheck(unsigned long, flags); \
69 arch_local_irq_restore(flags); \
70 } while (0)
71#define raw_local_save_flags(flags) \
72 do { \
73 typecheck(unsigned long, flags); \
74 flags = arch_local_save_flags(); \
75 } while (0)
76#define raw_irqs_disabled_flags(flags) \
77 ({ \
78 typecheck(unsigned long, flags); \
79 arch_irqs_disabled_flags(flags); \
80 })
81#define raw_irqs_disabled() (arch_irqs_disabled())
82#define raw_safe_halt() arch_safe_halt()
58 83
84/*
85 * The local_irq_*() APIs are equal to the raw_local_irq*()
86 * if !TRACE_IRQFLAGS.
87 */
88#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
59#define local_irq_enable() \ 89#define local_irq_enable() \
60 do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) 90 do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
61#define local_irq_disable() \ 91#define local_irq_disable() \
62 do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) 92 do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
63#define local_irq_save(flags) \ 93#define local_irq_save(flags) \
64 do { \ 94 do { \
65 typecheck(unsigned long, flags); \
66 raw_local_irq_save(flags); \ 95 raw_local_irq_save(flags); \
67 trace_hardirqs_off(); \ 96 trace_hardirqs_off(); \
68 } while (0) 97 } while (0)
@@ -70,7 +99,6 @@
70 99
71#define local_irq_restore(flags) \ 100#define local_irq_restore(flags) \
72 do { \ 101 do { \
73 typecheck(unsigned long, flags); \
74 if (raw_irqs_disabled_flags(flags)) { \ 102 if (raw_irqs_disabled_flags(flags)) { \
75 raw_local_irq_restore(flags); \ 103 raw_local_irq_restore(flags); \
76 trace_hardirqs_off(); \ 104 trace_hardirqs_off(); \
@@ -79,51 +107,44 @@
79 raw_local_irq_restore(flags); \ 107 raw_local_irq_restore(flags); \
80 } \ 108 } \
81 } while (0) 109 } while (0)
82#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ 110#define local_save_flags(flags) \
83/*
84 * The local_irq_*() APIs are equal to the raw_local_irq*()
85 * if !TRACE_IRQFLAGS.
86 */
87# define raw_local_irq_disable() local_irq_disable()
88# define raw_local_irq_enable() local_irq_enable()
89# define raw_local_irq_save(flags) \
90 do { \
91 typecheck(unsigned long, flags); \
92 local_irq_save(flags); \
93 } while (0)
94# define raw_local_irq_restore(flags) \
95 do { \ 111 do { \
96 typecheck(unsigned long, flags); \ 112 raw_local_save_flags(flags); \
97 local_irq_restore(flags); \
98 } while (0) 113 } while (0)
99#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
100 114
101#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 115#define irqs_disabled_flags(flags) \
102#define safe_halt() \ 116 ({ \
103 do { \ 117 raw_irqs_disabled_flags(flags); \
104 trace_hardirqs_on(); \ 118 })
105 raw_safe_halt(); \
106 } while (0)
107 119
108#define local_save_flags(flags) \ 120#define irqs_disabled() \
109 do { \ 121 ({ \
110 typecheck(unsigned long, flags); \ 122 unsigned long _flags; \
111 raw_local_save_flags(flags); \ 123 raw_local_save_flags(_flags); \
124 raw_irqs_disabled_flags(_flags); \
125 })
126
127#define safe_halt() \
128 do { \
129 trace_hardirqs_on(); \
130 raw_safe_halt(); \
112 } while (0) 131 } while (0)
113 132
114#define irqs_disabled() \
115({ \
116 unsigned long _flags; \
117 \
118 raw_local_save_flags(_flags); \
119 raw_irqs_disabled_flags(_flags); \
120})
121 133
122#define irqs_disabled_flags(flags) \ 134#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
123({ \ 135
124 typecheck(unsigned long, flags); \ 136#define local_irq_enable() do { raw_local_irq_enable(); } while (0)
125 raw_irqs_disabled_flags(flags); \ 137#define local_irq_disable() do { raw_local_irq_disable(); } while (0)
126}) 138#define local_irq_save(flags) \
139 do { \
140 raw_local_irq_save(flags); \
141 } while (0)
142#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0)
143#define local_save_flags(flags) do { raw_local_save_flags(flags); } while (0)
144#define irqs_disabled() (raw_irqs_disabled())
145#define irqs_disabled_flags(flags) (raw_irqs_disabled_flags(flags))
146#define safe_halt() do { raw_safe_halt(); } while (0)
147
127#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ 148#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
128 149
129#endif 150#endif
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index f8854655860e..80e535897de6 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -50,6 +50,7 @@
50#include <linux/preempt.h> 50#include <linux/preempt.h>
51#include <linux/linkage.h> 51#include <linux/linkage.h>
52#include <linux/compiler.h> 52#include <linux/compiler.h>
53#include <linux/irqflags.h>
53#include <linux/thread_info.h> 54#include <linux/thread_info.h>
54#include <linux/kernel.h> 55#include <linux/kernel.h>
55#include <linux/stringify.h> 56#include <linux/stringify.h>