diff options
author | Ingo Molnar <mingo@elte.hu> | 2006-07-03 03:24:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-03 18:27:03 -0400 |
commit | 6375e2b74c620794e1a27a26e4338aec2e41346a (patch) | |
tree | f966478bc214af98e9ca6235506377b5dd872526 /include/asm-x86_64/irqflags.h | |
parent | 2601e64d262ee5ed4d4a5737345803800d9c4db3 (diff) |
[PATCH] lockdep: irqtrace cleanup of include/asm-x86_64/irqflags.h
Clean up the x86-64 irqflags.h file:
- macro => inline function transformation
- simplifications
- style fixes
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: Andi Kleen <ak@muc.de>
Cc: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-x86_64/irqflags.h')
-rw-r--r-- | include/asm-x86_64/irqflags.h | 156 |
1 files changed, 118 insertions, 38 deletions
diff --git a/include/asm-x86_64/irqflags.h b/include/asm-x86_64/irqflags.h index 22f3c06b247e..cce6937e87c0 100644 --- a/include/asm-x86_64/irqflags.h +++ b/include/asm-x86_64/irqflags.h | |||
@@ -5,57 +5,137 @@ | |||
5 | * | 5 | * |
6 | * This file gets included from lowlevel asm headers too, to provide | 6 | * This file gets included from lowlevel asm headers too, to provide |
7 | * wrapped versions of the local_irq_*() APIs, based on the | 7 | * wrapped versions of the local_irq_*() APIs, based on the |
8 | * raw_local_irq_*() macros from the lowlevel headers. | 8 | * raw_local_irq_*() functions from the lowlevel headers. |
9 | */ | 9 | */ |
10 | #ifndef _ASM_IRQFLAGS_H | 10 | #ifndef _ASM_IRQFLAGS_H |
11 | #define _ASM_IRQFLAGS_H | 11 | #define _ASM_IRQFLAGS_H |
12 | 12 | ||
13 | #ifndef __ASSEMBLY__ | 13 | #ifndef __ASSEMBLY__ |
14 | /* | ||
15 | * Interrupt control: | ||
16 | */ | ||
17 | |||
18 | static inline unsigned long __raw_local_save_flags(void) | ||
19 | { | ||
20 | unsigned long flags; | ||
21 | |||
22 | __asm__ __volatile__( | ||
23 | "# __raw_save_flags\n\t" | ||
24 | "pushfq ; popq %q0" | ||
25 | : "=g" (flags) | ||
26 | : /* no input */ | ||
27 | : "memory" | ||
28 | ); | ||
14 | 29 | ||
15 | /* interrupt control.. */ | 30 | return flags; |
16 | #define raw_local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0) | 31 | } |
17 | #define raw_local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc") | 32 | |
33 | #define raw_local_save_flags(flags) \ | ||
34 | do { (flags) = __raw_local_save_flags(); } while (0) | ||
35 | |||
36 | static inline void raw_local_irq_restore(unsigned long flags) | ||
37 | { | ||
38 | __asm__ __volatile__( | ||
39 | "pushq %0 ; popfq" | ||
40 | : /* no output */ | ||
41 | :"g" (flags) | ||
42 | :"memory", "cc" | ||
43 | ); | ||
44 | } | ||
18 | 45 | ||
19 | #ifdef CONFIG_X86_VSMP | 46 | #ifdef CONFIG_X86_VSMP |
20 | /* Interrupt control for VSMP architecture */ | 47 | |
21 | #define raw_local_irq_disable() do { unsigned long flags; raw_local_save_flags(flags); raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0) | 48 | /* |
22 | #define raw_local_irq_enable() do { unsigned long flags; raw_local_save_flags(flags); raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0) | 49 | * Interrupt control for the VSMP architecture: |
23 | 50 | */ | |
24 | #define raw_irqs_disabled_flags(flags) \ | 51 | |
25 | ({ \ | 52 | static inline void raw_local_irq_disable(void) |
26 | (flags & (1<<18)) || !(flags & (1<<9)); \ | 53 | { |
27 | }) | 54 | unsigned long flags = __raw_local_save_flags(); |
28 | 55 | ||
29 | /* For spinlocks etc */ | 56 | raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); |
30 | #define raw_local_irq_save(x) do { raw_local_save_flags(x); raw_local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0) | 57 | } |
31 | #else /* CONFIG_X86_VSMP */ | 58 | |
32 | #define raw_local_irq_disable() __asm__ __volatile__("cli": : :"memory") | 59 | static inline void raw_local_irq_enable(void) |
33 | #define raw_local_irq_enable() __asm__ __volatile__("sti": : :"memory") | 60 | { |
34 | 61 | unsigned long flags = __raw_local_save_flags(); | |
35 | #define raw_irqs_disabled_flags(flags) \ | 62 | |
36 | ({ \ | 63 | raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); |
37 | !(flags & (1<<9)); \ | 64 | } |
38 | }) | 65 | |
39 | 66 | static inline int raw_irqs_disabled_flags(unsigned long flags) | |
40 | /* For spinlocks etc */ | 67 | { |
41 | #define raw_local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# raw_local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0) | 68 | return !(flags & (1<<9)) || (flags & (1 << 18)); |
69 | } | ||
70 | |||
71 | #else /* CONFIG_X86_VSMP */ | ||
72 | |||
73 | static inline void raw_local_irq_disable(void) | ||
74 | { | ||
75 | __asm__ __volatile__("cli" : : : "memory"); | ||
76 | } | ||
77 | |||
78 | static inline void raw_local_irq_enable(void) | ||
79 | { | ||
80 | __asm__ __volatile__("sti" : : : "memory"); | ||
81 | } | ||
82 | |||
83 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
84 | { | ||
85 | return !(flags & (1 << 9)); | ||
86 | } | ||
87 | |||
42 | #endif | 88 | #endif |
43 | 89 | ||
44 | #define raw_irqs_disabled() \ | 90 | /* |
45 | ({ \ | 91 | * For spinlocks, etc.: |
46 | unsigned long flags; \ | 92 | */ |
47 | raw_local_save_flags(flags); \ | 93 | |
48 | raw_irqs_disabled_flags(flags); \ | 94 | static inline unsigned long __raw_local_irq_save(void) |
49 | }) | 95 | { |
96 | unsigned long flags = __raw_local_save_flags(); | ||
97 | |||
98 | raw_local_irq_disable(); | ||
99 | |||
100 | return flags; | ||
101 | } | ||
50 | 102 | ||
51 | /* used in the idle loop; sti takes one instruction cycle to complete */ | 103 | #define raw_local_irq_save(flags) \ |
52 | #define raw_safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") | 104 | do { (flags) = __raw_local_irq_save(); } while (0) |
53 | /* used when interrupts are already enabled or to shutdown the processor */ | 105 | |
54 | #define halt() __asm__ __volatile__("hlt": : :"memory") | 106 | static inline int raw_irqs_disabled(void) |
107 | { | ||
108 | unsigned long flags = __raw_local_save_flags(); | ||
109 | |||
110 | return raw_irqs_disabled_flags(flags); | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Used in the idle loop; sti takes one instruction cycle | ||
115 | * to complete: | ||
116 | */ | ||
117 | static inline void raw_safe_halt(void) | ||
118 | { | ||
119 | __asm__ __volatile__("sti; hlt" : : : "memory"); | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * Used when interrupts are already enabled or to | ||
124 | * shutdown the processor: | ||
125 | */ | ||
126 | static inline void halt(void) | ||
127 | { | ||
128 | __asm__ __volatile__("hlt": : :"memory"); | ||
129 | } | ||
55 | 130 | ||
56 | #else /* __ASSEMBLY__: */ | 131 | #else /* __ASSEMBLY__: */ |
57 | # define TRACE_IRQS_ON | 132 | # ifdef CONFIG_TRACE_IRQFLAGS |
58 | # define TRACE_IRQS_OFF | 133 | # define TRACE_IRQS_ON call trace_hardirqs_on_thunk |
134 | # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk | ||
135 | # else | ||
136 | # define TRACE_IRQS_ON | ||
137 | # define TRACE_IRQS_OFF | ||
138 | # endif | ||
59 | #endif | 139 | #endif |
60 | 140 | ||
61 | #endif | 141 | #endif |