diff options
Diffstat (limited to 'include/linux/preempt.h')
-rw-r--r-- | include/linux/preempt.h | 123 |
1 files changed, 121 insertions, 2 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index de83b4eb1642..a1a00e14c14f 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
@@ -10,13 +10,117 @@ | |||
10 | #include <linux/list.h> | 10 | #include <linux/list.h> |
11 | 11 | ||
12 | /* | 12 | /* |
13 | * We use the MSB mostly because its available; see <linux/preempt_mask.h> for | 13 | * We put the hardirq and softirq counter into the preemption |
14 | * the other bits -- can't include that header due to inclusion hell. | 14 | * counter. The bitmask has the following meaning: |
15 | * | ||
16 | * - bits 0-7 are the preemption count (max preemption depth: 256) | ||
17 | * - bits 8-15 are the softirq count (max # of softirqs: 256) | ||
18 | * | ||
19 | * The hardirq count could in theory be the same as the number of | ||
20 | * interrupts in the system, but we run all interrupt handlers with | ||
21 | * interrupts disabled, so we cannot have nesting interrupts. Though | ||
22 | * there are a few palaeontologic drivers which reenable interrupts in | ||
23 | * the handler, so we need more than one bit here. | ||
24 | * | ||
25 | * PREEMPT_MASK: 0x000000ff | ||
26 | * SOFTIRQ_MASK: 0x0000ff00 | ||
27 | * HARDIRQ_MASK: 0x000f0000 | ||
28 | * NMI_MASK: 0x00100000 | ||
29 | * PREEMPT_ACTIVE: 0x00200000 | ||
30 | * PREEMPT_NEED_RESCHED: 0x80000000 | ||
15 | */ | 31 | */ |
32 | #define PREEMPT_BITS 8 | ||
33 | #define SOFTIRQ_BITS 8 | ||
34 | #define HARDIRQ_BITS 4 | ||
35 | #define NMI_BITS 1 | ||
36 | |||
37 | #define PREEMPT_SHIFT 0 | ||
38 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) | ||
39 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) | ||
40 | #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) | ||
41 | |||
42 | #define __IRQ_MASK(x) ((1UL << (x))-1) | ||
43 | |||
44 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) | ||
45 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | ||
46 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) | ||
47 | #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) | ||
48 | |||
49 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) | ||
50 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) | ||
51 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) | ||
52 | #define NMI_OFFSET (1UL << NMI_SHIFT) | ||
53 | |||
54 | #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) | ||
55 | |||
56 | #define PREEMPT_ACTIVE_BITS 1 | ||
57 | #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) | ||
58 | #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) | ||
59 | |||
60 | /* We use the MSB mostly because its available */ | ||
16 | #define PREEMPT_NEED_RESCHED 0x80000000 | 61 | #define PREEMPT_NEED_RESCHED 0x80000000 |
17 | 62 | ||
63 | /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ | ||
18 | #include <asm/preempt.h> | 64 | #include <asm/preempt.h> |
19 | 65 | ||
66 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) | ||
67 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) | ||
68 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ | ||
69 | | NMI_MASK)) | ||
70 | |||
71 | /* | ||
72 | * Are we doing bottom half or hardware interrupt processing? | ||
73 | * Are we in a softirq context? Interrupt context? | ||
74 | * in_softirq - Are we currently processing softirq or have bh disabled? | ||
75 | * in_serving_softirq - Are we currently processing softirq? | ||
76 | */ | ||
77 | #define in_irq() (hardirq_count()) | ||
78 | #define in_softirq() (softirq_count()) | ||
79 | #define in_interrupt() (irq_count()) | ||
80 | #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) | ||
81 | |||
82 | /* | ||
83 | * Are we in NMI context? | ||
84 | */ | ||
85 | #define in_nmi() (preempt_count() & NMI_MASK) | ||
86 | |||
87 | #if defined(CONFIG_PREEMPT_COUNT) | ||
88 | # define PREEMPT_DISABLE_OFFSET 1 | ||
89 | #else | ||
90 | # define PREEMPT_DISABLE_OFFSET 0 | ||
91 | #endif | ||
92 | |||
93 | /* | ||
94 | * The preempt_count offset needed for things like: | ||
95 | * | ||
96 | * spin_lock_bh() | ||
97 | * | ||
98 | * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and | ||
99 | * softirqs, such that unlock sequences of: | ||
100 | * | ||
101 | * spin_unlock(); | ||
102 | * local_bh_enable(); | ||
103 | * | ||
104 | * Work as expected. | ||
105 | */ | ||
106 | #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET) | ||
107 | |||
108 | /* | ||
109 | * Are we running in atomic context? WARNING: this macro cannot | ||
110 | * always detect atomic context; in particular, it cannot know about | ||
111 | * held spinlocks in non-preemptible kernels. Thus it should not be | ||
112 | * used in the general case to determine whether sleeping is possible. | ||
113 | * Do not use in_atomic() in driver code. | ||
114 | */ | ||
115 | #define in_atomic() (preempt_count() != 0) | ||
116 | |||
117 | /* | ||
118 | * Check whether we were atomic before we did preempt_disable(): | ||
119 | * (used by the scheduler) | ||
120 | */ | ||
121 | #define in_atomic_preempt_off() \ | ||
122 | ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET) | ||
123 | |||
20 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) | 124 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) |
21 | extern void preempt_count_add(int val); | 125 | extern void preempt_count_add(int val); |
22 | extern void preempt_count_sub(int val); | 126 | extern void preempt_count_sub(int val); |
@@ -33,6 +137,18 @@ extern void preempt_count_sub(int val); | |||
33 | #define preempt_count_inc() preempt_count_add(1) | 137 | #define preempt_count_inc() preempt_count_add(1) |
34 | #define preempt_count_dec() preempt_count_sub(1) | 138 | #define preempt_count_dec() preempt_count_sub(1) |
35 | 139 | ||
140 | #define preempt_active_enter() \ | ||
141 | do { \ | ||
142 | preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \ | ||
143 | barrier(); \ | ||
144 | } while (0) | ||
145 | |||
146 | #define preempt_active_exit() \ | ||
147 | do { \ | ||
148 | barrier(); \ | ||
149 | preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \ | ||
150 | } while (0) | ||
151 | |||
36 | #ifdef CONFIG_PREEMPT_COUNT | 152 | #ifdef CONFIG_PREEMPT_COUNT |
37 | 153 | ||
38 | #define preempt_disable() \ | 154 | #define preempt_disable() \ |
@@ -49,6 +165,8 @@ do { \ | |||
49 | 165 | ||
50 | #define preempt_enable_no_resched() sched_preempt_enable_no_resched() | 166 | #define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
51 | 167 | ||
168 | #define preemptible() (preempt_count() == 0 && !irqs_disabled()) | ||
169 | |||
52 | #ifdef CONFIG_PREEMPT | 170 | #ifdef CONFIG_PREEMPT |
53 | #define preempt_enable() \ | 171 | #define preempt_enable() \ |
54 | do { \ | 172 | do { \ |
@@ -121,6 +239,7 @@ do { \ | |||
121 | #define preempt_disable_notrace() barrier() | 239 | #define preempt_disable_notrace() barrier() |
122 | #define preempt_enable_no_resched_notrace() barrier() | 240 | #define preempt_enable_no_resched_notrace() barrier() |
123 | #define preempt_enable_notrace() barrier() | 241 | #define preempt_enable_notrace() barrier() |
242 | #define preemptible() 0 | ||
124 | 243 | ||
125 | #endif /* CONFIG_PREEMPT_COUNT */ | 244 | #endif /* CONFIG_PREEMPT_COUNT */ |
126 | 245 | ||