diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:20:03 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:20:03 -0400 |
commit | 96a388de5dc53a8b234b3fd41f3ae2cedc9ffd42 (patch) | |
tree | d947a467aa2da3140279617bc4b9b101640d7bf4 /include/asm-x86/irqflags_32.h | |
parent | 27bd0c955648646abf2a353a8371d28c37bcd982 (diff) |
i386/x86_64: move headers to include/asm-x86
Move the headers to include/asm-x86 and fixup the
header install make rules
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/irqflags_32.h')
-rw-r--r-- | include/asm-x86/irqflags_32.h | 163 |
1 files changed, 163 insertions, 0 deletions
diff --git a/include/asm-x86/irqflags_32.h b/include/asm-x86/irqflags_32.h new file mode 100644 index 000000000000..eff8585cb741 --- /dev/null +++ b/include/asm-x86/irqflags_32.h | |||
@@ -0,0 +1,163 @@ | |||
1 | /* | ||
2 | * include/asm-i386/irqflags.h | ||
3 | * | ||
4 | * IRQ flags handling | ||
5 | * | ||
6 | * This file gets included from lowlevel asm headers too, to provide | ||
7 | * wrapped versions of the local_irq_*() APIs, based on the | ||
8 | * raw_local_irq_*() functions from the lowlevel headers. | ||
9 | */ | ||
10 | #ifndef _ASM_IRQFLAGS_H | ||
11 | #define _ASM_IRQFLAGS_H | ||
12 | #include <asm/processor-flags.h> | ||
13 | |||
14 | #ifndef __ASSEMBLY__ | ||
15 | static inline unsigned long native_save_fl(void) | ||
16 | { | ||
17 | unsigned long f; | ||
18 | asm volatile("pushfl ; popl %0":"=g" (f): /* no input */); | ||
19 | return f; | ||
20 | } | ||
21 | |||
22 | static inline void native_restore_fl(unsigned long f) | ||
23 | { | ||
24 | asm volatile("pushl %0 ; popfl": /* no output */ | ||
25 | :"g" (f) | ||
26 | :"memory", "cc"); | ||
27 | } | ||
28 | |||
29 | static inline void native_irq_disable(void) | ||
30 | { | ||
31 | asm volatile("cli": : :"memory"); | ||
32 | } | ||
33 | |||
34 | static inline void native_irq_enable(void) | ||
35 | { | ||
36 | asm volatile("sti": : :"memory"); | ||
37 | } | ||
38 | |||
39 | static inline void native_safe_halt(void) | ||
40 | { | ||
41 | asm volatile("sti; hlt": : :"memory"); | ||
42 | } | ||
43 | |||
44 | static inline void native_halt(void) | ||
45 | { | ||
46 | asm volatile("hlt": : :"memory"); | ||
47 | } | ||
48 | #endif /* __ASSEMBLY__ */ | ||
49 | |||
50 | #ifdef CONFIG_PARAVIRT | ||
51 | #include <asm/paravirt.h> | ||
52 | #else | ||
53 | #ifndef __ASSEMBLY__ | ||
54 | |||
55 | static inline unsigned long __raw_local_save_flags(void) | ||
56 | { | ||
57 | return native_save_fl(); | ||
58 | } | ||
59 | |||
60 | static inline void raw_local_irq_restore(unsigned long flags) | ||
61 | { | ||
62 | native_restore_fl(flags); | ||
63 | } | ||
64 | |||
65 | static inline void raw_local_irq_disable(void) | ||
66 | { | ||
67 | native_irq_disable(); | ||
68 | } | ||
69 | |||
70 | static inline void raw_local_irq_enable(void) | ||
71 | { | ||
72 | native_irq_enable(); | ||
73 | } | ||
74 | |||
75 | /* | ||
76 | * Used in the idle loop; sti takes one instruction cycle | ||
77 | * to complete: | ||
78 | */ | ||
79 | static inline void raw_safe_halt(void) | ||
80 | { | ||
81 | native_safe_halt(); | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * Used when interrupts are already enabled or to | ||
86 | * shutdown the processor: | ||
87 | */ | ||
88 | static inline void halt(void) | ||
89 | { | ||
90 | native_halt(); | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * For spinlocks, etc: | ||
95 | */ | ||
96 | static inline unsigned long __raw_local_irq_save(void) | ||
97 | { | ||
98 | unsigned long flags = __raw_local_save_flags(); | ||
99 | |||
100 | raw_local_irq_disable(); | ||
101 | |||
102 | return flags; | ||
103 | } | ||
104 | |||
105 | #else | ||
106 | #define DISABLE_INTERRUPTS(clobbers) cli | ||
107 | #define ENABLE_INTERRUPTS(clobbers) sti | ||
108 | #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit | ||
109 | #define INTERRUPT_RETURN iret | ||
110 | #define GET_CR0_INTO_EAX movl %cr0, %eax | ||
111 | #endif /* __ASSEMBLY__ */ | ||
112 | #endif /* CONFIG_PARAVIRT */ | ||
113 | |||
114 | #ifndef __ASSEMBLY__ | ||
115 | #define raw_local_save_flags(flags) \ | ||
116 | do { (flags) = __raw_local_save_flags(); } while (0) | ||
117 | |||
118 | #define raw_local_irq_save(flags) \ | ||
119 | do { (flags) = __raw_local_irq_save(); } while (0) | ||
120 | |||
121 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
122 | { | ||
123 | return !(flags & X86_EFLAGS_IF); | ||
124 | } | ||
125 | |||
126 | static inline int raw_irqs_disabled(void) | ||
127 | { | ||
128 | unsigned long flags = __raw_local_save_flags(); | ||
129 | |||
130 | return raw_irqs_disabled_flags(flags); | ||
131 | } | ||
132 | #endif /* __ASSEMBLY__ */ | ||
133 | |||
134 | /* | ||
135 | * Do the CPU's IRQ-state tracing from assembly code. We call a | ||
136 | * C function, so save all the C-clobbered registers: | ||
137 | */ | ||
138 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
139 | |||
140 | # define TRACE_IRQS_ON \ | ||
141 | pushl %eax; \ | ||
142 | pushl %ecx; \ | ||
143 | pushl %edx; \ | ||
144 | call trace_hardirqs_on; \ | ||
145 | popl %edx; \ | ||
146 | popl %ecx; \ | ||
147 | popl %eax; | ||
148 | |||
149 | # define TRACE_IRQS_OFF \ | ||
150 | pushl %eax; \ | ||
151 | pushl %ecx; \ | ||
152 | pushl %edx; \ | ||
153 | call trace_hardirqs_off; \ | ||
154 | popl %edx; \ | ||
155 | popl %ecx; \ | ||
156 | popl %eax; | ||
157 | |||
158 | #else | ||
159 | # define TRACE_IRQS_ON | ||
160 | # define TRACE_IRQS_OFF | ||
161 | #endif | ||
162 | |||
163 | #endif | ||