diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-07-05 13:13:03 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-07-05 13:13:03 -0400 |
commit | 5e66dd6d66ffe758b39b6dcadf2330753ee1159b (patch) | |
tree | a72cdcff4448e4af9425cc213ddf56ab23e697fe /include/linux/interrupt.h | |
parent | 026477c1141b67e98e3bd8bdedb7d4b88a3ecd09 (diff) | |
parent | ca78f6baca863afe2e6a244a0fe94b3a70211d46 (diff) |
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
Diffstat (limited to 'include/linux/interrupt.h')
-rw-r--r-- | include/linux/interrupt.h | 77 |
1 files changed, 70 insertions, 7 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index da3e0dbe61d4..d5afee95fd43 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/irqreturn.h> | 10 | #include <linux/irqreturn.h> |
11 | #include <linux/hardirq.h> | 11 | #include <linux/hardirq.h> |
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/irqflags.h> | ||
13 | #include <asm/atomic.h> | 14 | #include <asm/atomic.h> |
14 | #include <asm/ptrace.h> | 15 | #include <asm/ptrace.h> |
15 | #include <asm/system.h> | 16 | #include <asm/system.h> |
@@ -80,12 +81,64 @@ extern int request_irq(unsigned int, | |||
80 | unsigned long, const char *, void *); | 81 | unsigned long, const char *, void *); |
81 | extern void free_irq(unsigned int, void *); | 82 | extern void free_irq(unsigned int, void *); |
82 | 83 | ||
84 | /* | ||
85 | * On lockdep we dont want to enable hardirqs in hardirq | ||
86 | * context. Use local_irq_enable_in_hardirq() to annotate | ||
87 | * kernel code that has to do this nevertheless (pretty much | ||
88 | * the only valid case is for old/broken hardware that is | ||
89 | * insanely slow). | ||
90 | * | ||
91 | * NOTE: in theory this might break fragile code that relies | ||
92 | * on hardirq delivery - in practice we dont seem to have such | ||
93 | * places left. So the only effect should be slightly increased | ||
94 | * irqs-off latencies. | ||
95 | */ | ||
96 | #ifdef CONFIG_LOCKDEP | ||
97 | # define local_irq_enable_in_hardirq() do { } while (0) | ||
98 | #else | ||
99 | # define local_irq_enable_in_hardirq() local_irq_enable() | ||
100 | #endif | ||
83 | 101 | ||
84 | #ifdef CONFIG_GENERIC_HARDIRQS | 102 | #ifdef CONFIG_GENERIC_HARDIRQS |
85 | extern void disable_irq_nosync(unsigned int irq); | 103 | extern void disable_irq_nosync(unsigned int irq); |
86 | extern void disable_irq(unsigned int irq); | 104 | extern void disable_irq(unsigned int irq); |
87 | extern void enable_irq(unsigned int irq); | 105 | extern void enable_irq(unsigned int irq); |
88 | 106 | ||
107 | /* | ||
108 | * Special lockdep variants of irq disabling/enabling. | ||
109 | * These should be used for locking constructs that | ||
110 | * know that a particular irq context which is disabled, | ||
111 | * and which is the only irq-context user of a lock, | ||
112 | * that it's safe to take the lock in the irq-disabled | ||
113 | * section without disabling hardirqs. | ||
114 | * | ||
115 | * On !CONFIG_LOCKDEP they are equivalent to the normal | ||
116 | * irq disable/enable methods. | ||
117 | */ | ||
118 | static inline void disable_irq_nosync_lockdep(unsigned int irq) | ||
119 | { | ||
120 | disable_irq_nosync(irq); | ||
121 | #ifdef CONFIG_LOCKDEP | ||
122 | local_irq_disable(); | ||
123 | #endif | ||
124 | } | ||
125 | |||
126 | static inline void disable_irq_lockdep(unsigned int irq) | ||
127 | { | ||
128 | disable_irq(irq); | ||
129 | #ifdef CONFIG_LOCKDEP | ||
130 | local_irq_disable(); | ||
131 | #endif | ||
132 | } | ||
133 | |||
134 | static inline void enable_irq_lockdep(unsigned int irq) | ||
135 | { | ||
136 | #ifdef CONFIG_LOCKDEP | ||
137 | local_irq_enable(); | ||
138 | #endif | ||
139 | enable_irq(irq); | ||
140 | } | ||
141 | |||
89 | /* IRQ wakeup (PM) control: */ | 142 | /* IRQ wakeup (PM) control: */ |
90 | extern int set_irq_wake(unsigned int irq, unsigned int on); | 143 | extern int set_irq_wake(unsigned int irq, unsigned int on); |
91 | 144 | ||
@@ -99,7 +152,19 @@ static inline int disable_irq_wake(unsigned int irq) | |||
99 | return set_irq_wake(irq, 0); | 152 | return set_irq_wake(irq, 0); |
100 | } | 153 | } |
101 | 154 | ||
102 | #endif | 155 | #else /* !CONFIG_GENERIC_HARDIRQS */ |
156 | /* | ||
157 | * NOTE: non-genirq architectures, if they want to support the lock | ||
158 | * validator need to define the methods below in their asm/irq.h | ||
159 | * files, under an #ifdef CONFIG_LOCKDEP section. | ||
160 | */ | ||
161 | # ifndef CONFIG_LOCKDEP | ||
162 | # define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq) | ||
163 | # define disable_irq_lockdep(irq) disable_irq(irq) | ||
164 | # define enable_irq_lockdep(irq) enable_irq(irq) | ||
165 | # endif | ||
166 | |||
167 | #endif /* CONFIG_GENERIC_HARDIRQS */ | ||
103 | 168 | ||
104 | #ifndef __ARCH_SET_SOFTIRQ_PENDING | 169 | #ifndef __ARCH_SET_SOFTIRQ_PENDING |
105 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) | 170 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) |
@@ -135,13 +200,11 @@ static inline void __deprecated save_and_cli(unsigned long *x) | |||
135 | #define save_and_cli(x) save_and_cli(&x) | 200 | #define save_and_cli(x) save_and_cli(&x) |
136 | #endif /* CONFIG_SMP */ | 201 | #endif /* CONFIG_SMP */ |
137 | 202 | ||
138 | /* SoftIRQ primitives. */ | 203 | extern void local_bh_disable(void); |
139 | #define local_bh_disable() \ | 204 | extern void __local_bh_enable(void); |
140 | do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0) | 205 | extern void _local_bh_enable(void); |
141 | #define __local_bh_enable() \ | ||
142 | do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0) | ||
143 | |||
144 | extern void local_bh_enable(void); | 206 | extern void local_bh_enable(void); |
207 | extern void local_bh_enable_ip(unsigned long ip); | ||
145 | 208 | ||
146 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high | 209 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high |
147 | frequency threaded job scheduling. For almost all the purposes | 210 | frequency threaded job scheduling. For almost all the purposes |