diff options
author | Ingo Molnar <mingo@elte.hu> | 2006-07-03 03:24:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-03 18:27:03 -0400 |
commit | 2601e64d262ee5ed4d4a5737345803800d9c4db3 (patch) | |
tree | 67f852752af3c54f745b93f7016cdd7c5ffb1a14 /include/asm-x86_64 | |
parent | c8558fcdecb1f920df8050be4f2d5f499060030e (diff) |
[PATCH] lockdep: irqtrace subsystem, x86_64 support
Add irqflags-tracing support to x86_64.
[akpm@osdl.org: build fix]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r-- | include/asm-x86_64/irqflags.h | 61 | ||||
-rw-r--r-- | include/asm-x86_64/system.h | 38 |
2 files changed, 62 insertions, 37 deletions
diff --git a/include/asm-x86_64/irqflags.h b/include/asm-x86_64/irqflags.h new file mode 100644 index 000000000000..22f3c06b247e --- /dev/null +++ b/include/asm-x86_64/irqflags.h | |||
@@ -0,0 +1,61 @@ | |||
1 | /* | ||
2 | * include/asm-x86_64/irqflags.h | ||
3 | * | ||
4 | * IRQ flags handling | ||
5 | * | ||
6 | * This file gets included from lowlevel asm headers too, to provide | ||
7 | * wrapped versions of the local_irq_*() APIs, based on the | ||
8 | * raw_local_irq_*() macros from the lowlevel headers. | ||
9 | */ | ||
10 | #ifndef _ASM_IRQFLAGS_H | ||
11 | #define _ASM_IRQFLAGS_H | ||
12 | |||
13 | #ifndef __ASSEMBLY__ | ||
14 | |||
15 | /* interrupt control.. */ | ||
16 | #define raw_local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0) | ||
17 | #define raw_local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc") | ||
18 | |||
19 | #ifdef CONFIG_X86_VSMP | ||
20 | /* Interrupt control for VSMP architecture */ | ||
21 | #define raw_local_irq_disable() do { unsigned long flags; raw_local_save_flags(flags); raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0) | ||
22 | #define raw_local_irq_enable() do { unsigned long flags; raw_local_save_flags(flags); raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0) | ||
23 | |||
24 | #define raw_irqs_disabled_flags(flags) \ | ||
25 | ({ \ | ||
26 | (flags & (1<<18)) || !(flags & (1<<9)); \ | ||
27 | }) | ||
28 | |||
29 | /* For spinlocks etc */ | ||
30 | #define raw_local_irq_save(x) do { raw_local_save_flags(x); raw_local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0) | ||
31 | #else /* CONFIG_X86_VSMP */ | ||
32 | #define raw_local_irq_disable() __asm__ __volatile__("cli": : :"memory") | ||
33 | #define raw_local_irq_enable() __asm__ __volatile__("sti": : :"memory") | ||
34 | |||
35 | #define raw_irqs_disabled_flags(flags) \ | ||
36 | ({ \ | ||
37 | !(flags & (1<<9)); \ | ||
38 | }) | ||
39 | |||
40 | /* For spinlocks etc */ | ||
41 | #define raw_local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# raw_local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0) | ||
42 | #endif | ||
43 | |||
44 | #define raw_irqs_disabled() \ | ||
45 | ({ \ | ||
46 | unsigned long flags; \ | ||
47 | raw_local_save_flags(flags); \ | ||
48 | raw_irqs_disabled_flags(flags); \ | ||
49 | }) | ||
50 | |||
51 | /* used in the idle loop; sti takes one instruction cycle to complete */ | ||
52 | #define raw_safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") | ||
53 | /* used when interrupts are already enabled or to shutdown the processor */ | ||
54 | #define halt() __asm__ __volatile__("hlt": : :"memory") | ||
55 | |||
56 | #else /* __ASSEMBLY__: */ | ||
57 | # define TRACE_IRQS_ON | ||
58 | # define TRACE_IRQS_OFF | ||
59 | #endif | ||
60 | |||
61 | #endif | ||
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index 68e559f3631c..f67f2873a922 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h | |||
@@ -244,43 +244,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
244 | 244 | ||
245 | #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) | 245 | #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) |
246 | 246 | ||
247 | /* interrupt control.. */ | 247 | #include <linux/irqflags.h> |
248 | #define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0) | ||
249 | #define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc") | ||
250 | |||
251 | #ifdef CONFIG_X86_VSMP | ||
252 | /* Interrupt control for VSMP architecture */ | ||
253 | #define local_irq_disable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0) | ||
254 | #define local_irq_enable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0) | ||
255 | |||
256 | #define irqs_disabled() \ | ||
257 | ({ \ | ||
258 | unsigned long flags; \ | ||
259 | local_save_flags(flags); \ | ||
260 | (flags & (1<<18)) || !(flags & (1<<9)); \ | ||
261 | }) | ||
262 | |||
263 | /* For spinlocks etc */ | ||
264 | #define local_irq_save(x) do { local_save_flags(x); local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0) | ||
265 | #else /* CONFIG_X86_VSMP */ | ||
266 | #define local_irq_disable() __asm__ __volatile__("cli": : :"memory") | ||
267 | #define local_irq_enable() __asm__ __volatile__("sti": : :"memory") | ||
268 | |||
269 | #define irqs_disabled() \ | ||
270 | ({ \ | ||
271 | unsigned long flags; \ | ||
272 | local_save_flags(flags); \ | ||
273 | !(flags & (1<<9)); \ | ||
274 | }) | ||
275 | |||
276 | /* For spinlocks etc */ | ||
277 | #define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0) | ||
278 | #endif | ||
279 | |||
280 | /* used in the idle loop; sti takes one instruction cycle to complete */ | ||
281 | #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") | ||
282 | /* used when interrupts are already enabled or to shutdown the processor */ | ||
283 | #define halt() __asm__ __volatile__("hlt": : :"memory") | ||
284 | 248 | ||
285 | void cpu_idle_wait(void); | 249 | void cpu_idle_wait(void); |
286 | 250 | ||