aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/preempt.h
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2015-05-12 10:41:47 -0400
committerIngo Molnar <mingo@kernel.org>2015-05-19 02:39:12 -0400
commit2e10e71ce88e3eaccfd09a045ae6ecebe657ba09 (patch)
treee471d5427053c300748fb9b6c4263a8ba8087015 /include/linux/preempt.h
parent92cf211874e954027b8e91cc9a15485a50b58d6b (diff)
sched/preempt: Rearrange a few symbols after headers merge
Adjust a few comments, and further integrate a few definitions after the dumb headers copy. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1431441711-29753-3-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/preempt.h')
-rw-r--r--include/linux/preempt.h34
1 files changed, 15 insertions, 19 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 8cc0338a5e9a..37974cd4f092 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -10,14 +10,6 @@
10#include <linux/list.h> 10#include <linux/list.h>
11 11
12/* 12/*
13 * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
14 * the other bits -- can't include that header due to inclusion hell.
15 */
16#define PREEMPT_NEED_RESCHED 0x80000000
17
18#include <asm/preempt.h>
19
20/*
21 * We put the hardirq and softirq counter into the preemption 13 * We put the hardirq and softirq counter into the preemption
22 * counter. The bitmask has the following meaning: 14 * counter. The bitmask has the following meaning:
23 * 15 *
@@ -30,11 +22,12 @@
30 * there are a few palaeontologic drivers which reenable interrupts in 22 * there are a few palaeontologic drivers which reenable interrupts in
31 * the handler, so we need more than one bit here. 23 * the handler, so we need more than one bit here.
32 * 24 *
33 * PREEMPT_MASK: 0x000000ff 25 * PREEMPT_MASK: 0x000000ff
34 * SOFTIRQ_MASK: 0x0000ff00 26 * SOFTIRQ_MASK: 0x0000ff00
35 * HARDIRQ_MASK: 0x000f0000 27 * HARDIRQ_MASK: 0x000f0000
36 * NMI_MASK: 0x00100000 28 * NMI_MASK: 0x00100000
37 * PREEMPT_ACTIVE: 0x00200000 29 * PREEMPT_ACTIVE: 0x00200000
30 * PREEMPT_NEED_RESCHED: 0x80000000
38 */ 31 */
39#define PREEMPT_BITS 8 32#define PREEMPT_BITS 8
40#define SOFTIRQ_BITS 8 33#define SOFTIRQ_BITS 8
@@ -64,6 +57,12 @@
64#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) 57#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
65#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) 58#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
66 59
60/* We use the MSB mostly because its available */
61#define PREEMPT_NEED_RESCHED 0x80000000
62
63/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
64#include <asm/preempt.h>
65
67#define hardirq_count() (preempt_count() & HARDIRQ_MASK) 66#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
68#define softirq_count() (preempt_count() & SOFTIRQ_MASK) 67#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
69#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ 68#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
@@ -122,12 +121,6 @@
122#define in_atomic_preempt_off() \ 121#define in_atomic_preempt_off() \
123 ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) 122 ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
124 123
125#ifdef CONFIG_PREEMPT_COUNT
126# define preemptible() (preempt_count() == 0 && !irqs_disabled())
127#else
128# define preemptible() 0
129#endif
130
131#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) 124#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
132extern void preempt_count_add(int val); 125extern void preempt_count_add(int val);
133extern void preempt_count_sub(int val); 126extern void preempt_count_sub(int val);
@@ -160,6 +153,8 @@ do { \
160 153
161#define preempt_enable_no_resched() sched_preempt_enable_no_resched() 154#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
162 155
156#define preemptible() (preempt_count() == 0 && !irqs_disabled())
157
163#ifdef CONFIG_PREEMPT 158#ifdef CONFIG_PREEMPT
164#define preempt_enable() \ 159#define preempt_enable() \
165do { \ 160do { \
@@ -232,6 +227,7 @@ do { \
232#define preempt_disable_notrace() barrier() 227#define preempt_disable_notrace() barrier()
233#define preempt_enable_no_resched_notrace() barrier() 228#define preempt_enable_no_resched_notrace() barrier()
234#define preempt_enable_notrace() barrier() 229#define preempt_enable_notrace() barrier()
230#define preemptible() 0
235 231
236#endif /* CONFIG_PREEMPT_COUNT */ 232#endif /* CONFIG_PREEMPT_COUNT */
237 233