aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/preempt.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-09-10 06:15:23 -0400
committerIngo Molnar <mingo@kernel.org>2013-09-25 08:07:54 -0400
commitbdb43806589096ac4272fe1307e789846ac08d7c (patch)
treec854e7e508193766d5cbdd82e8709cfab5ea3be5 /include/linux/preempt.h
parent01028747559ac6c6f642a7bbd2875cc4f66b2feb (diff)
sched: Extract the basic add/sub preempt_count modifiers
Rewrite the preempt_count macros in order to extract the 3 basic preempt_count value modifiers: __preempt_count_add() __preempt_count_sub() and the new: __preempt_count_dec_and_test() And since we're at it anyway, replace the unconventional $op_preempt_count names with the more conventional preempt_count_$op. Since these basic operators are equivalent to the previous _notrace() variants, do away with the _notrace() versions. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-ewbpdbupy9xpsjhg960zwbv8@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/preempt.h')
-rw-r--r--include/linux/preempt.h106
1 files changed, 48 insertions, 58 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index df8e245e8729..2343d8715299 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -18,97 +18,86 @@
18#include <asm/preempt.h> 18#include <asm/preempt.h>
19 19
20#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) 20#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
21 extern void add_preempt_count(int val); 21extern void preempt_count_add(int val);
22 extern void sub_preempt_count(int val); 22extern void preempt_count_sub(int val);
23#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
23#else 24#else
24# define add_preempt_count(val) do { *preempt_count_ptr() += (val); } while (0) 25#define preempt_count_add(val) __preempt_count_add(val)
25# define sub_preempt_count(val) do { *preempt_count_ptr() -= (val); } while (0) 26#define preempt_count_sub(val) __preempt_count_sub(val)
27#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
26#endif 28#endif
27 29
28#define inc_preempt_count() add_preempt_count(1) 30#define __preempt_count_inc() __preempt_count_add(1)
29#define dec_preempt_count() sub_preempt_count(1) 31#define __preempt_count_dec() __preempt_count_sub(1)
30
31#ifdef CONFIG_PREEMPT
32
33asmlinkage void preempt_schedule(void);
34
35#define preempt_check_resched() \
36do { \
37 if (unlikely(!*preempt_count_ptr())) \
38 preempt_schedule(); \
39} while (0)
40
41#ifdef CONFIG_CONTEXT_TRACKING
42
43void preempt_schedule_context(void);
44
45#define preempt_check_resched_context() \
46do { \
47 if (unlikely(!*preempt_count_ptr())) \
48 preempt_schedule_context(); \
49} while (0)
50#else
51
52#define preempt_check_resched_context() preempt_check_resched()
53
54#endif /* CONFIG_CONTEXT_TRACKING */
55
56#else /* !CONFIG_PREEMPT */
57
58#define preempt_check_resched() do { } while (0)
59#define preempt_check_resched_context() do { } while (0)
60
61#endif /* CONFIG_PREEMPT */
62 32
33#define preempt_count_inc() preempt_count_add(1)
34#define preempt_count_dec() preempt_count_sub(1)
63 35
64#ifdef CONFIG_PREEMPT_COUNT 36#ifdef CONFIG_PREEMPT_COUNT
65 37
66#define preempt_disable() \ 38#define preempt_disable() \
67do { \ 39do { \
68 inc_preempt_count(); \ 40 preempt_count_inc(); \
69 barrier(); \ 41 barrier(); \
70} while (0) 42} while (0)
71 43
72#define sched_preempt_enable_no_resched() \ 44#define sched_preempt_enable_no_resched() \
73do { \ 45do { \
74 barrier(); \ 46 barrier(); \
75 dec_preempt_count(); \ 47 preempt_count_dec(); \
76} while (0) 48} while (0)
77 49
78#define preempt_enable_no_resched() sched_preempt_enable_no_resched() 50#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
79 51
52#ifdef CONFIG_PREEMPT
53asmlinkage void preempt_schedule(void);
80#define preempt_enable() \ 54#define preempt_enable() \
81do { \ 55do { \
82 preempt_enable_no_resched(); \ 56 barrier(); \
83 preempt_check_resched(); \ 57 if (unlikely(preempt_count_dec_and_test())) \
58 preempt_schedule(); \
84} while (0) 59} while (0)
85 60
86/* For debugging and tracer internals only! */ 61#define preempt_check_resched() \
87#define add_preempt_count_notrace(val) \ 62do { \
88 do { *preempt_count_ptr() += (val); } while (0) 63 if (should_resched()) \
89#define sub_preempt_count_notrace(val) \ 64 preempt_schedule(); \
90 do { *preempt_count_ptr() -= (val); } while (0) 65} while (0)
91#define inc_preempt_count_notrace() add_preempt_count_notrace(1) 66
92#define dec_preempt_count_notrace() sub_preempt_count_notrace(1) 67#else
68#define preempt_enable() preempt_enable_no_resched()
69#define preempt_check_resched() do { } while (0)
70#endif
93 71
94#define preempt_disable_notrace() \ 72#define preempt_disable_notrace() \
95do { \ 73do { \
96 inc_preempt_count_notrace(); \ 74 __preempt_count_inc(); \
97 barrier(); \ 75 barrier(); \
98} while (0) 76} while (0)
99 77
100#define preempt_enable_no_resched_notrace() \ 78#define preempt_enable_no_resched_notrace() \
101do { \ 79do { \
102 barrier(); \ 80 barrier(); \
103 dec_preempt_count_notrace(); \ 81 __preempt_count_dec(); \
104} while (0) 82} while (0)
105 83
106/* preempt_check_resched is OK to trace */ 84#ifdef CONFIG_PREEMPT
85
86#ifdef CONFIG_CONTEXT_TRACKING
87asmlinkage void preempt_schedule_context(void);
88#else
89#define preempt_schedule_context() preempt_schedule()
90#endif
91
107#define preempt_enable_notrace() \ 92#define preempt_enable_notrace() \
108do { \ 93do { \
109 preempt_enable_no_resched_notrace(); \ 94 barrier(); \
110 preempt_check_resched_context(); \ 95 if (unlikely(__preempt_count_dec_and_test())) \
96 preempt_schedule_context(); \
111} while (0) 97} while (0)
98#else
99#define preempt_enable_notrace() preempt_enable_no_resched_notrace()
100#endif
112 101
113#else /* !CONFIG_PREEMPT_COUNT */ 102#else /* !CONFIG_PREEMPT_COUNT */
114 103
@@ -118,10 +107,11 @@ do { \
118 * that can cause faults and scheduling migrate into our preempt-protected 107 * that can cause faults and scheduling migrate into our preempt-protected
119 * region. 108 * region.
120 */ 109 */
121#define preempt_disable() barrier() 110#define preempt_disable() barrier()
122#define sched_preempt_enable_no_resched() barrier() 111#define sched_preempt_enable_no_resched() barrier()
123#define preempt_enable_no_resched() barrier() 112#define preempt_enable_no_resched() barrier()
124#define preempt_enable() barrier() 113#define preempt_enable() barrier()
114#define preempt_check_resched() do { } while (0)
125 115
126#define preempt_disable_notrace() barrier() 116#define preempt_disable_notrace() barrier()
127#define preempt_enable_no_resched_notrace() barrier() 117#define preempt_enable_no_resched_notrace() barrier()