aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-07-29 14:29:43 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2013-08-14 11:14:52 -0400
commit2d4b84739f0acf61f37267b4e80c5bd85fb90a7e (patch)
treecf67a3d29aad9609f63ab1a6e2e54e68bbd7e9f0
parente7358b3bc0d7fcec0e7a724477f06db2b0c68e21 (diff)
hardirq: Split preempt count mask definitions
In order to use static keys with vtime APIs, we'll need to add static keys headers to vtime.h hardirq.h then becomes a problem because it needs vtime.h for irqtime accounting in irq_enter/irq_exit, but it's often included just to get the irq mask definitions in the task preempt_count field and the APIs that come along: in_interrupt(), in_hardirq(), etc... Some very low level arch headers sometimes need these masks and APIs such as arch/m68k/include/asm/irqflags.h for example. But they don't want to include hardirq.h if vtime.h, jump_label.h and even workqueue.h come along. Including such bloated high level header from arch headers can quickly result in circular headers dependency that crash the build. So let's split hardirq.h in two parts: * preempt_mask.h that gathers all the preempt_count definitions and the APIs associated. This one is considered low level and can be safely included anywhere. * hardirq.h that includes the previous one. It defines the irq entry/exit APIs. To avoid future circular headers dependencies, the preempt_mask.h inclusion can replace hardirq.h on files that don't implement irq low level handlers but just need the atomic/context check APIs. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Kevin Hilman <khilman@linaro.org>
-rw-r--r--include/linux/hardirq.h117
-rw-r--r--include/linux/preempt_mask.h122
2 files changed, 123 insertions, 116 deletions
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 05bcc0903766..ccfe17c5c8da 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -1,126 +1,11 @@
1#ifndef LINUX_HARDIRQ_H 1#ifndef LINUX_HARDIRQ_H
2#define LINUX_HARDIRQ_H 2#define LINUX_HARDIRQ_H
3 3
4#include <linux/preempt.h> 4#include <linux/preempt_mask.h>
5#include <linux/lockdep.h> 5#include <linux/lockdep.h>
6#include <linux/ftrace_irq.h> 6#include <linux/ftrace_irq.h>
7#include <linux/vtime.h> 7#include <linux/vtime.h>
8#include <asm/hardirq.h>
9 8
10/*
11 * We put the hardirq and softirq counter into the preemption
12 * counter. The bitmask has the following meaning:
13 *
14 * - bits 0-7 are the preemption count (max preemption depth: 256)
15 * - bits 8-15 are the softirq count (max # of softirqs: 256)
16 *
17 * The hardirq count can in theory reach the same as NR_IRQS.
18 * In reality, the number of nested IRQS is limited to the stack
19 * size as well. For archs with over 1000 IRQS it is not practical
20 * to expect that they will all nest. We give a max of 10 bits for
21 * hardirq nesting. An arch may choose to give less than 10 bits.
22 * m68k expects it to be 8.
23 *
24 * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
25 * - bit 26 is the NMI_MASK
26 * - bit 27 is the PREEMPT_ACTIVE flag
27 *
28 * PREEMPT_MASK: 0x000000ff
29 * SOFTIRQ_MASK: 0x0000ff00
30 * HARDIRQ_MASK: 0x03ff0000
31 * NMI_MASK: 0x04000000
32 */
33#define PREEMPT_BITS 8
34#define SOFTIRQ_BITS 8
35#define NMI_BITS 1
36
37#define MAX_HARDIRQ_BITS 10
38
39#ifndef HARDIRQ_BITS
40# define HARDIRQ_BITS MAX_HARDIRQ_BITS
41#endif
42
43#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
44#error HARDIRQ_BITS too high!
45#endif
46
47#define PREEMPT_SHIFT 0
48#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
49#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
50#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
51
52#define __IRQ_MASK(x) ((1UL << (x))-1)
53
54#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
55#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
56#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
57#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
58
59#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
60#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
61#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
62#define NMI_OFFSET (1UL << NMI_SHIFT)
63
64#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
65
66#ifndef PREEMPT_ACTIVE
67#define PREEMPT_ACTIVE_BITS 1
68#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
69#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
70#endif
71
72#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
73#error PREEMPT_ACTIVE is too low!
74#endif
75
76#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
77#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
78#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
79 | NMI_MASK))
80
81/*
82 * Are we doing bottom half or hardware interrupt processing?
83 * Are we in a softirq context? Interrupt context?
84 * in_softirq - Are we currently processing softirq or have bh disabled?
85 * in_serving_softirq - Are we currently processing softirq?
86 */
87#define in_irq() (hardirq_count())
88#define in_softirq() (softirq_count())
89#define in_interrupt() (irq_count())
90#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
91
92/*
93 * Are we in NMI context?
94 */
95#define in_nmi() (preempt_count() & NMI_MASK)
96
97#if defined(CONFIG_PREEMPT_COUNT)
98# define PREEMPT_CHECK_OFFSET 1
99#else
100# define PREEMPT_CHECK_OFFSET 0
101#endif
102
103/*
104 * Are we running in atomic context? WARNING: this macro cannot
105 * always detect atomic context; in particular, it cannot know about
106 * held spinlocks in non-preemptible kernels. Thus it should not be
107 * used in the general case to determine whether sleeping is possible.
108 * Do not use in_atomic() in driver code.
109 */
110#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
111
112/*
113 * Check whether we were atomic before we did preempt_disable():
114 * (used by the scheduler, *after* releasing the kernel lock)
115 */
116#define in_atomic_preempt_off() \
117 ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
118
119#ifdef CONFIG_PREEMPT_COUNT
120# define preemptible() (preempt_count() == 0 && !irqs_disabled())
121#else
122# define preemptible() 0
123#endif
124 9
125#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS) 10#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
126extern void synchronize_irq(unsigned int irq); 11extern void synchronize_irq(unsigned int irq);
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
new file mode 100644
index 000000000000..931bc616219f
--- /dev/null
+++ b/include/linux/preempt_mask.h
@@ -0,0 +1,122 @@
1#ifndef LINUX_PREEMPT_MASK_H
2#define LINUX_PREEMPT_MASK_H
3
4#include <linux/preempt.h>
5#include <asm/hardirq.h>
6
7/*
8 * We put the hardirq and softirq counter into the preemption
9 * counter. The bitmask has the following meaning:
10 *
11 * - bits 0-7 are the preemption count (max preemption depth: 256)
12 * - bits 8-15 are the softirq count (max # of softirqs: 256)
13 *
14 * The hardirq count can in theory reach the same as NR_IRQS.
15 * In reality, the number of nested IRQS is limited to the stack
16 * size as well. For archs with over 1000 IRQS it is not practical
17 * to expect that they will all nest. We give a max of 10 bits for
18 * hardirq nesting. An arch may choose to give less than 10 bits.
19 * m68k expects it to be 8.
20 *
21 * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
22 * - bit 26 is the NMI_MASK
23 * - bit 27 is the PREEMPT_ACTIVE flag
24 *
25 * PREEMPT_MASK: 0x000000ff
26 * SOFTIRQ_MASK: 0x0000ff00
27 * HARDIRQ_MASK: 0x03ff0000
28 * NMI_MASK: 0x04000000
29 */
30#define PREEMPT_BITS 8
31#define SOFTIRQ_BITS 8
32#define NMI_BITS 1
33
34#define MAX_HARDIRQ_BITS 10
35
36#ifndef HARDIRQ_BITS
37# define HARDIRQ_BITS MAX_HARDIRQ_BITS
38#endif
39
40#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
41#error HARDIRQ_BITS too high!
42#endif
43
44#define PREEMPT_SHIFT 0
45#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
46#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
47#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
48
49#define __IRQ_MASK(x) ((1UL << (x))-1)
50
51#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
52#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
53#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
54#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
55
56#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
57#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
58#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
59#define NMI_OFFSET (1UL << NMI_SHIFT)
60
61#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
62
63#ifndef PREEMPT_ACTIVE
64#define PREEMPT_ACTIVE_BITS 1
65#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
66#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
67#endif
68
69#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
70#error PREEMPT_ACTIVE is too low!
71#endif
72
73#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
74#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
75#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
76 | NMI_MASK))
77
78/*
79 * Are we doing bottom half or hardware interrupt processing?
80 * Are we in a softirq context? Interrupt context?
81 * in_softirq - Are we currently processing softirq or have bh disabled?
82 * in_serving_softirq - Are we currently processing softirq?
83 */
84#define in_irq() (hardirq_count())
85#define in_softirq() (softirq_count())
86#define in_interrupt() (irq_count())
87#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
88
89/*
90 * Are we in NMI context?
91 */
92#define in_nmi() (preempt_count() & NMI_MASK)
93
94#if defined(CONFIG_PREEMPT_COUNT)
95# define PREEMPT_CHECK_OFFSET 1
96#else
97# define PREEMPT_CHECK_OFFSET 0
98#endif
99
100/*
101 * Are we running in atomic context? WARNING: this macro cannot
102 * always detect atomic context; in particular, it cannot know about
103 * held spinlocks in non-preemptible kernels. Thus it should not be
104 * used in the general case to determine whether sleeping is possible.
105 * Do not use in_atomic() in driver code.
106 */
107#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
108
109/*
110 * Check whether we were atomic before we did preempt_disable():
111 * (used by the scheduler, *after* releasing the kernel lock)
112 */
113#define in_atomic_preempt_off() \
114 ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
115
116#ifdef CONFIG_PREEMPT_COUNT
117# define preemptible() (preempt_count() == 0 && !irqs_disabled())
118#else
119# define preemptible() 0
120#endif
121
122#endif /* LINUX_PREEMPT_MASK_H */