diff options
Diffstat (limited to 'include/linux/hardirq.h')
-rw-r--r-- | include/linux/hardirq.h | 75 |
1 files changed, 45 insertions, 30 deletions
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index f83288347dda..45257475623c 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
@@ -15,55 +15,61 @@ | |||
15 | * - bits 0-7 are the preemption count (max preemption depth: 256) | 15 | * - bits 0-7 are the preemption count (max preemption depth: 256) |
16 | * - bits 8-15 are the softirq count (max # of softirqs: 256) | 16 | * - bits 8-15 are the softirq count (max # of softirqs: 256) |
17 | * | 17 | * |
18 | * The hardirq count can be overridden per architecture, the default is: | 18 | * The hardirq count can in theory reach the same as NR_IRQS. |
19 | * In reality, the number of nested IRQS is limited to the stack | ||
20 | * size as well. For archs with over 1000 IRQS it is not practical | ||
21 | * to expect that they will all nest. We give a max of 10 bits for | ||
22 | * hardirq nesting. An arch may choose to give less than 10 bits. | ||
23 | * m68k expects it to be 8. | ||
19 | * | 24 | * |
20 | * - bits 16-27 are the hardirq count (max # of hardirqs: 4096) | 25 | * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) |
21 | * - ( bit 28 is the PREEMPT_ACTIVE flag. ) | 26 | * - bit 26 is the NMI_MASK |
27 | * - bit 28 is the PREEMPT_ACTIVE flag | ||
22 | * | 28 | * |
23 | * PREEMPT_MASK: 0x000000ff | 29 | * PREEMPT_MASK: 0x000000ff |
24 | * SOFTIRQ_MASK: 0x0000ff00 | 30 | * SOFTIRQ_MASK: 0x0000ff00 |
25 | * HARDIRQ_MASK: 0x0fff0000 | 31 | * HARDIRQ_MASK: 0x03ff0000 |
32 | * NMI_MASK: 0x04000000 | ||
26 | */ | 33 | */ |
27 | #define PREEMPT_BITS 8 | 34 | #define PREEMPT_BITS 8 |
28 | #define SOFTIRQ_BITS 8 | 35 | #define SOFTIRQ_BITS 8 |
36 | #define NMI_BITS 1 | ||
29 | 37 | ||
30 | #ifndef HARDIRQ_BITS | 38 | #define MAX_HARDIRQ_BITS 10 |
31 | #define HARDIRQ_BITS 12 | ||
32 | 39 | ||
33 | #ifndef MAX_HARDIRQS_PER_CPU | 40 | #ifndef HARDIRQ_BITS |
34 | #define MAX_HARDIRQS_PER_CPU NR_IRQS | 41 | # define HARDIRQ_BITS MAX_HARDIRQ_BITS |
35 | #endif | 42 | #endif |
36 | 43 | ||
37 | /* | 44 | #if HARDIRQ_BITS > MAX_HARDIRQ_BITS |
38 | * The hardirq mask has to be large enough to have space for potentially | 45 | #error HARDIRQ_BITS too high! |
39 | * all IRQ sources in the system nesting on a single CPU. | ||
40 | */ | ||
41 | #if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU | ||
42 | # error HARDIRQ_BITS is too low! | ||
43 | #endif | ||
44 | #endif | 46 | #endif |
45 | 47 | ||
46 | #define PREEMPT_SHIFT 0 | 48 | #define PREEMPT_SHIFT 0 |
47 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) | 49 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) |
48 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) | 50 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) |
51 | #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) | ||
49 | 52 | ||
50 | #define __IRQ_MASK(x) ((1UL << (x))-1) | 53 | #define __IRQ_MASK(x) ((1UL << (x))-1) |
51 | 54 | ||
52 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) | 55 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) |
53 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | 56 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) |
54 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) | 57 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) |
58 | #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) | ||
55 | 59 | ||
56 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) | 60 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) |
57 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) | 61 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) |
58 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) | 62 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) |
63 | #define NMI_OFFSET (1UL << NMI_SHIFT) | ||
59 | 64 | ||
60 | #if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) | 65 | #if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) |
61 | #error PREEMPT_ACTIVE is too low! | 66 | #error PREEMPT_ACTIVE is too low! |
62 | #endif | 67 | #endif |
63 | 68 | ||
64 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) | 69 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
65 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) | 70 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
66 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) | 71 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ |
72 | | NMI_MASK)) | ||
67 | 73 | ||
68 | /* | 74 | /* |
69 | * Are we doing bottom half or hardware interrupt processing? | 75 | * Are we doing bottom half or hardware interrupt processing? |
@@ -73,6 +79,11 @@ | |||
73 | #define in_softirq() (softirq_count()) | 79 | #define in_softirq() (softirq_count()) |
74 | #define in_interrupt() (irq_count()) | 80 | #define in_interrupt() (irq_count()) |
75 | 81 | ||
82 | /* | ||
83 | * Are we in NMI context? | ||
84 | */ | ||
85 | #define in_nmi() (preempt_count() & NMI_MASK) | ||
86 | |||
76 | #if defined(CONFIG_PREEMPT) | 87 | #if defined(CONFIG_PREEMPT) |
77 | # define PREEMPT_INATOMIC_BASE kernel_locked() | 88 | # define PREEMPT_INATOMIC_BASE kernel_locked() |
78 | # define PREEMPT_CHECK_OFFSET 1 | 89 | # define PREEMPT_CHECK_OFFSET 1 |
@@ -105,7 +116,7 @@ | |||
105 | # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET | 116 | # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET |
106 | #endif | 117 | #endif |
107 | 118 | ||
108 | #ifdef CONFIG_SMP | 119 | #if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS) |
109 | extern void synchronize_irq(unsigned int irq); | 120 | extern void synchronize_irq(unsigned int irq); |
110 | #else | 121 | #else |
111 | # define synchronize_irq(irq) barrier() | 122 | # define synchronize_irq(irq) barrier() |
@@ -164,20 +175,24 @@ extern void irq_enter(void); | |||
164 | */ | 175 | */ |
165 | extern void irq_exit(void); | 176 | extern void irq_exit(void); |
166 | 177 | ||
167 | #define nmi_enter() \ | 178 | #define nmi_enter() \ |
168 | do { \ | 179 | do { \ |
169 | ftrace_nmi_enter(); \ | 180 | ftrace_nmi_enter(); \ |
170 | lockdep_off(); \ | 181 | BUG_ON(in_nmi()); \ |
171 | rcu_nmi_enter(); \ | 182 | add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ |
172 | __irq_enter(); \ | 183 | lockdep_off(); \ |
184 | rcu_nmi_enter(); \ | ||
185 | trace_hardirq_enter(); \ | ||
173 | } while (0) | 186 | } while (0) |
174 | 187 | ||
175 | #define nmi_exit() \ | 188 | #define nmi_exit() \ |
176 | do { \ | 189 | do { \ |
177 | __irq_exit(); \ | 190 | trace_hardirq_exit(); \ |
178 | rcu_nmi_exit(); \ | 191 | rcu_nmi_exit(); \ |
179 | lockdep_on(); \ | 192 | lockdep_on(); \ |
180 | ftrace_nmi_exit(); \ | 193 | BUG_ON(!in_nmi()); \ |
194 | sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ | ||
195 | ftrace_nmi_exit(); \ | ||
181 | } while (0) | 196 | } while (0) |
182 | 197 | ||
183 | #endif /* LINUX_HARDIRQ_H */ | 198 | #endif /* LINUX_HARDIRQ_H */ |