summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-09-10 06:15:23 -0400
committerIngo Molnar <mingo@kernel.org>2013-09-25 08:07:54 -0400
commitbdb43806589096ac4272fe1307e789846ac08d7c (patch)
treec854e7e508193766d5cbdd82e8709cfab5ea3be5 /include
parent01028747559ac6c6f642a7bbd2875cc4f66b2feb (diff)
sched: Extract the basic add/sub preempt_count modifiers
Rewrite the preempt_count macros in order to extract the 3 basic preempt_count value modifiers: __preempt_count_add() __preempt_count_sub() and the new: __preempt_count_dec_and_test() And since we're at it anyway, replace the unconventional $op_preempt_count names with the more conventional preempt_count_$op. Since these basic operators are equivalent to the previous _notrace() variants, do away with the _notrace() versions. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-ewbpdbupy9xpsjhg960zwbv8@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/preempt.h35
-rw-r--r--include/linux/hardirq.h8
-rw-r--r--include/linux/preempt.h106
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/linux/uaccess.h8
5 files changed, 89 insertions, 73 deletions
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index 8100b1ec1715..82d958fc3823 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -65,4 +65,39 @@ static __always_inline bool test_preempt_need_resched(void)
65 return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); 65 return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
66} 66}
67 67
68/*
69 * The various preempt_count add/sub methods
70 */
71
72static __always_inline void __preempt_count_add(int val)
73{
74 *preempt_count_ptr() += val;
75}
76
77static __always_inline void __preempt_count_sub(int val)
78{
79 *preempt_count_ptr() -= val;
80}
81
82static __always_inline bool __preempt_count_dec_and_test(void)
83{
84 return !--*preempt_count_ptr();
85}
86
87/*
88 * Returns true when we need to resched -- even if we can not.
89 */
90static __always_inline bool need_resched(void)
91{
92 return unlikely(test_preempt_need_resched());
93}
94
95/*
96 * Returns true when we need to resched and can (barring IRQ state).
97 */
98static __always_inline bool should_resched(void)
99{
100 return unlikely(!*preempt_count_ptr());
101}
102
68#endif /* __ASM_PREEMPT_H */ 103#endif /* __ASM_PREEMPT_H */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 1e041063b226..d9cf963ac832 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -33,7 +33,7 @@ extern void rcu_nmi_exit(void);
33#define __irq_enter() \ 33#define __irq_enter() \
34 do { \ 34 do { \
35 account_irq_enter_time(current); \ 35 account_irq_enter_time(current); \
36 add_preempt_count(HARDIRQ_OFFSET); \ 36 preempt_count_add(HARDIRQ_OFFSET); \
37 trace_hardirq_enter(); \ 37 trace_hardirq_enter(); \
38 } while (0) 38 } while (0)
39 39
@@ -49,7 +49,7 @@ extern void irq_enter(void);
49 do { \ 49 do { \
50 trace_hardirq_exit(); \ 50 trace_hardirq_exit(); \
51 account_irq_exit_time(current); \ 51 account_irq_exit_time(current); \
52 sub_preempt_count(HARDIRQ_OFFSET); \ 52 preempt_count_sub(HARDIRQ_OFFSET); \
53 } while (0) 53 } while (0)
54 54
55/* 55/*
@@ -62,7 +62,7 @@ extern void irq_exit(void);
62 lockdep_off(); \ 62 lockdep_off(); \
63 ftrace_nmi_enter(); \ 63 ftrace_nmi_enter(); \
64 BUG_ON(in_nmi()); \ 64 BUG_ON(in_nmi()); \
65 add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ 65 preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
66 rcu_nmi_enter(); \ 66 rcu_nmi_enter(); \
67 trace_hardirq_enter(); \ 67 trace_hardirq_enter(); \
68 } while (0) 68 } while (0)
@@ -72,7 +72,7 @@ extern void irq_exit(void);
72 trace_hardirq_exit(); \ 72 trace_hardirq_exit(); \
73 rcu_nmi_exit(); \ 73 rcu_nmi_exit(); \
74 BUG_ON(!in_nmi()); \ 74 BUG_ON(!in_nmi()); \
75 sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ 75 preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
76 ftrace_nmi_exit(); \ 76 ftrace_nmi_exit(); \
77 lockdep_on(); \ 77 lockdep_on(); \
78 } while (0) 78 } while (0)
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index df8e245e8729..2343d8715299 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -18,97 +18,86 @@
18#include <asm/preempt.h> 18#include <asm/preempt.h>
19 19
20#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) 20#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
21 extern void add_preempt_count(int val); 21extern void preempt_count_add(int val);
22 extern void sub_preempt_count(int val); 22extern void preempt_count_sub(int val);
23#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
23#else 24#else
24# define add_preempt_count(val) do { *preempt_count_ptr() += (val); } while (0) 25#define preempt_count_add(val) __preempt_count_add(val)
25# define sub_preempt_count(val) do { *preempt_count_ptr() -= (val); } while (0) 26#define preempt_count_sub(val) __preempt_count_sub(val)
27#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
26#endif 28#endif
27 29
28#define inc_preempt_count() add_preempt_count(1) 30#define __preempt_count_inc() __preempt_count_add(1)
29#define dec_preempt_count() sub_preempt_count(1) 31#define __preempt_count_dec() __preempt_count_sub(1)
30
31#ifdef CONFIG_PREEMPT
32
33asmlinkage void preempt_schedule(void);
34
35#define preempt_check_resched() \
36do { \
37 if (unlikely(!*preempt_count_ptr())) \
38 preempt_schedule(); \
39} while (0)
40
41#ifdef CONFIG_CONTEXT_TRACKING
42
43void preempt_schedule_context(void);
44
45#define preempt_check_resched_context() \
46do { \
47 if (unlikely(!*preempt_count_ptr())) \
48 preempt_schedule_context(); \
49} while (0)
50#else
51
52#define preempt_check_resched_context() preempt_check_resched()
53
54#endif /* CONFIG_CONTEXT_TRACKING */
55
56#else /* !CONFIG_PREEMPT */
57
58#define preempt_check_resched() do { } while (0)
59#define preempt_check_resched_context() do { } while (0)
60
61#endif /* CONFIG_PREEMPT */
62 32
33#define preempt_count_inc() preempt_count_add(1)
34#define preempt_count_dec() preempt_count_sub(1)
63 35
64#ifdef CONFIG_PREEMPT_COUNT 36#ifdef CONFIG_PREEMPT_COUNT
65 37
66#define preempt_disable() \ 38#define preempt_disable() \
67do { \ 39do { \
68 inc_preempt_count(); \ 40 preempt_count_inc(); \
69 barrier(); \ 41 barrier(); \
70} while (0) 42} while (0)
71 43
72#define sched_preempt_enable_no_resched() \ 44#define sched_preempt_enable_no_resched() \
73do { \ 45do { \
74 barrier(); \ 46 barrier(); \
75 dec_preempt_count(); \ 47 preempt_count_dec(); \
76} while (0) 48} while (0)
77 49
78#define preempt_enable_no_resched() sched_preempt_enable_no_resched() 50#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
79 51
52#ifdef CONFIG_PREEMPT
53asmlinkage void preempt_schedule(void);
80#define preempt_enable() \ 54#define preempt_enable() \
81do { \ 55do { \
82 preempt_enable_no_resched(); \ 56 barrier(); \
83 preempt_check_resched(); \ 57 if (unlikely(preempt_count_dec_and_test())) \
58 preempt_schedule(); \
84} while (0) 59} while (0)
85 60
86/* For debugging and tracer internals only! */ 61#define preempt_check_resched() \
87#define add_preempt_count_notrace(val) \ 62do { \
88 do { *preempt_count_ptr() += (val); } while (0) 63 if (should_resched()) \
89#define sub_preempt_count_notrace(val) \ 64 preempt_schedule(); \
90 do { *preempt_count_ptr() -= (val); } while (0) 65} while (0)
91#define inc_preempt_count_notrace() add_preempt_count_notrace(1) 66
92#define dec_preempt_count_notrace() sub_preempt_count_notrace(1) 67#else
68#define preempt_enable() preempt_enable_no_resched()
69#define preempt_check_resched() do { } while (0)
70#endif
93 71
94#define preempt_disable_notrace() \ 72#define preempt_disable_notrace() \
95do { \ 73do { \
96 inc_preempt_count_notrace(); \ 74 __preempt_count_inc(); \
97 barrier(); \ 75 barrier(); \
98} while (0) 76} while (0)
99 77
100#define preempt_enable_no_resched_notrace() \ 78#define preempt_enable_no_resched_notrace() \
101do { \ 79do { \
102 barrier(); \ 80 barrier(); \
103 dec_preempt_count_notrace(); \ 81 __preempt_count_dec(); \
104} while (0) 82} while (0)
105 83
106/* preempt_check_resched is OK to trace */ 84#ifdef CONFIG_PREEMPT
85
86#ifdef CONFIG_CONTEXT_TRACKING
87asmlinkage void preempt_schedule_context(void);
88#else
89#define preempt_schedule_context() preempt_schedule()
90#endif
91
107#define preempt_enable_notrace() \ 92#define preempt_enable_notrace() \
108do { \ 93do { \
109 preempt_enable_no_resched_notrace(); \ 94 barrier(); \
110 preempt_check_resched_context(); \ 95 if (unlikely(__preempt_count_dec_and_test())) \
96 preempt_schedule_context(); \
111} while (0) 97} while (0)
98#else
99#define preempt_enable_notrace() preempt_enable_no_resched_notrace()
100#endif
112 101
113#else /* !CONFIG_PREEMPT_COUNT */ 102#else /* !CONFIG_PREEMPT_COUNT */
114 103
@@ -118,10 +107,11 @@ do { \
118 * that can cause faults and scheduling migrate into our preempt-protected 107 * that can cause faults and scheduling migrate into our preempt-protected
119 * region. 108 * region.
120 */ 109 */
121#define preempt_disable() barrier() 110#define preempt_disable() barrier()
122#define sched_preempt_enable_no_resched() barrier() 111#define sched_preempt_enable_no_resched() barrier()
123#define preempt_enable_no_resched() barrier() 112#define preempt_enable_no_resched() barrier()
124#define preempt_enable() barrier() 113#define preempt_enable() barrier()
114#define preempt_check_resched() do { } while (0)
125 115
126#define preempt_disable_notrace() barrier() 116#define preempt_disable_notrace() barrier()
127#define preempt_enable_no_resched_notrace() barrier() 117#define preempt_enable_no_resched_notrace() barrier()
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9fa151fb968e..06ac17c7e639 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2409,11 +2409,6 @@ static inline int signal_pending_state(long state, struct task_struct *p)
2409 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); 2409 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2410} 2410}
2411 2411
2412static inline int need_resched(void)
2413{
2414 return unlikely(test_preempt_need_resched());
2415}
2416
2417/* 2412/*
2418 * cond_resched() and cond_resched_lock(): latency reduction via 2413 * cond_resched() and cond_resched_lock(): latency reduction via
2419 * explicit rescheduling in places that are safe. The return 2414 * explicit rescheduling in places that are safe. The return
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 5ca0951e1855..9d8cf056e661 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -15,7 +15,7 @@
15 */ 15 */
16static inline void pagefault_disable(void) 16static inline void pagefault_disable(void)
17{ 17{
18 inc_preempt_count(); 18 preempt_count_inc();
19 /* 19 /*
20 * make sure to have issued the store before a pagefault 20 * make sure to have issued the store before a pagefault
21 * can hit. 21 * can hit.
@@ -30,11 +30,7 @@ static inline void pagefault_enable(void)
30 * the pagefault handler again. 30 * the pagefault handler again.
31 */ 31 */
32 barrier(); 32 barrier();
33 dec_preempt_count(); 33 preempt_count_dec();
34 /*
35 * make sure we do..
36 */
37 barrier();
38 preempt_check_resched(); 34 preempt_check_resched();
39} 35}
40 36