aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-09-10 06:15:23 -0400
committerIngo Molnar <mingo@kernel.org>2013-09-25 08:07:54 -0400
commitbdb43806589096ac4272fe1307e789846ac08d7c (patch)
treec854e7e508193766d5cbdd82e8709cfab5ea3be5 /include/asm-generic
parent01028747559ac6c6f642a7bbd2875cc4f66b2feb (diff)
sched: Extract the basic add/sub preempt_count modifiers
Rewrite the preempt_count macros in order to extract the 3 basic preempt_count value modifiers: __preempt_count_add() __preempt_count_sub() and the new: __preempt_count_dec_and_test() And since we're at it anyway, replace the unconventional $op_preempt_count names with the more conventional preempt_count_$op. Since these basic operators are equivalent to the previous _notrace() variants, do away with the _notrace() versions. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-ewbpdbupy9xpsjhg960zwbv8@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/preempt.h35
1 files changed, 35 insertions, 0 deletions
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index 8100b1ec1715..82d958fc3823 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -65,4 +65,39 @@ static __always_inline bool test_preempt_need_resched(void)
65 return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); 65 return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
66} 66}
67 67
68/*
69 * The various preempt_count add/sub methods
70 */
71
72static __always_inline void __preempt_count_add(int val)
73{
74 *preempt_count_ptr() += val;
75}
76
77static __always_inline void __preempt_count_sub(int val)
78{
79 *preempt_count_ptr() -= val;
80}
81
82static __always_inline bool __preempt_count_dec_and_test(void)
83{
84 return !--*preempt_count_ptr();
85}
86
87/*
88 * Returns true when we need to resched -- even if we can not.
89 */
90static __always_inline bool need_resched(void)
91{
92 return unlikely(test_preempt_need_resched());
93}
94
95/*
96 * Returns true when we need to resched and can (barring IRQ state).
97 */
98static __always_inline bool should_resched(void)
99{
100 return unlikely(!*preempt_count_ptr());
101}
102
68#endif /* __ASM_PREEMPT_H */ 103#endif /* __ASM_PREEMPT_H */